Esempio n. 1
0
func TestDropIndex(t *testing.T) {
	defer leaktest.AfterTest(t)()
	params, _ := createTestServerParams()
	s, sqlDB, kvDB := serverutils.StartServer(t, params)
	defer s.Stopper().Stop()

	if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.kv (k CHAR PRIMARY KEY, v CHAR);
CREATE INDEX foo on t.kv (v);
INSERT INTO t.kv VALUES ('c', 'e'), ('a', 'c'), ('b', 'd');
`); err != nil {
		t.Fatal(err)
	}

	tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "kv")

	status, i, err := tableDesc.FindIndexByName("foo")
	if err != nil {
		t.Fatal(err)
	}
	if status != sqlbase.DescriptorActive {
		t.Fatal("Index 'foo' is not active.")
	}
	indexPrefix := sqlbase.MakeIndexKeyPrefix(tableDesc.ID, tableDesc.Indexes[i].ID)

	indexStartKey := roachpb.Key(indexPrefix)
	indexEndKey := indexStartKey.PrefixEnd()
	if kvs, err := kvDB.Scan(indexStartKey, indexEndKey, 0); err != nil {
		t.Fatal(err)
	} else if l := 3; len(kvs) != l {
		t.Fatalf("expected %d key value pairs, but got %d", l, len(kvs))
	}

	if _, err := sqlDB.Exec(`DROP INDEX t.kv@foo`); err != nil {
		t.Fatal(err)
	}

	if kvs, err := kvDB.Scan(indexStartKey, indexEndKey, 0); err != nil {
		t.Fatal(err)
	} else if l := 0; len(kvs) != l {
		t.Fatalf("expected %d key value pairs, but got %d", l, len(kvs))
	}

	tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "kv")

	if _, _, err := tableDesc.FindIndexByName("foo"); err == nil {
		t.Fatalf("table descriptor still contains index after index is dropped")
	}
	if err != nil {
		t.Fatal(err)
	}
}
Esempio n. 2
0
// Test that table names are not treated as case sensitive by the name cache.
func TestTableNameNotCaseSensitive(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s, sqlDB, kvDB, cleanup := sqlutils.SetupServer(t)
	defer cleanup()
	leaseManager := s.LeaseManager().(*LeaseManager)

	if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR);
`); err != nil {
		t.Fatal(err)
	}

	// Populate the name cache.
	if _, err := sqlDB.Exec("SELECT * FROM t.test;"); err != nil {
		t.Fatal(err)
	}

	tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test")

	// Check that we can get the table by a different name.
	lease := leaseManager.tableNames.get(tableDesc.ParentID, "tEsT", s.Clock())
	if lease == nil {
		t.Fatalf("no name cache entry")
	}
	if err := leaseManager.Release(lease); err != nil {
		t.Fatal(err)
	}
}
// Test that abruptly closing a pgwire connection releases all leases held by
// that session.
func TestPGWireConnectionCloseReleasesLeases(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s, _, kvDB := serverutils.StartServer(t, base.TestServerArgs{})
	defer s.Stopper().Stop()
	url, cleanupConn := sqlutils.PGUrl(t, s.ServingAddr(), security.RootUser, "SetupServer")
	defer cleanupConn()
	conn, err := pq.Open(url.String())
	if err != nil {
		t.Fatal(err)
	}
	ex := conn.(driver.Execer)
	if _, err := ex.Exec("CREATE DATABASE test", nil); err != nil {
		t.Fatal(err)
	}
	if _, err := ex.Exec("CREATE TABLE test.t (i INT PRIMARY KEY)", nil); err != nil {
		t.Fatal(err)
	}
	// Start a txn so leases are accumulated by queries.
	if _, err := ex.Exec("BEGIN", nil); err != nil {
		t.Fatal(err)
	}
	// Get a table lease.
	if _, err := ex.Exec("SELECT * FROM test.t", nil); err != nil {
		t.Fatal(err)
	}
	// Abruptly close the connection.
	if err := conn.Close(); err != nil {
		t.Fatal(err)
	}
	// Verify that there are no leases held.
	tableDesc := sqlbase.GetTableDescriptor(kvDB, "test", "t")
	lm := s.LeaseManager().(*LeaseManager)
	// Looking for a table state validates that there used to be a lease on the
	// table.
	ts := lm.findTableState(tableDesc.ID, false /* create */)
	if ts == nil {
		t.Fatal("table state not found")
	}
	ts.mu.Lock()
	leases := ts.active.data
	ts.mu.Unlock()
	if len(leases) != 1 {
		t.Fatalf("expected one lease, found: %d", len(leases))
	}
	// Wait for the lease to be released.
	util.SucceedsSoon(t, func() error {
		ts.mu.Lock()
		refcount := ts.active.data[0].refcount
		ts.mu.Unlock()
		if refcount != 0 {
			return errors.Errorf(
				"expected lease to be unused, found refcount: %d", refcount)
		}
		return nil
	})
}
Esempio n. 4
0
// Test that we fail to lease a table that was marked for deletion.
func TestCantLeaseDeletedTable(testingT *testing.T) {
	defer leaktest.AfterTest(testingT)()

	var mu sync.Mutex
	clearSchemaChangers := false

	ctx, _ := createTestServerContext()
	ctx.TestingKnobs = base.TestingKnobs{
		SQLExecutor: &csql.ExecutorTestingKnobs{
			SyncSchemaChangersFilter: func(tscc csql.TestingSchemaChangerCollection) {
				mu.Lock()
				defer mu.Unlock()
				if clearSchemaChangers {
					tscc.ClearSchemaChangers()
				}
			},
		},
		SQLSchemaChangeManager: &csql.SchemaChangeManagerTestingKnobs{
			AsyncSchemaChangerExecNotification: schemaChangeManagerDisabled,
		},
	}

	t := newLeaseTest(testingT, &ctx)
	defer t.cleanup()

	sql := `
CREATE DATABASE test;
CREATE TABLE test.t(a INT PRIMARY KEY);
`
	_, err := t.db.Exec(sql)
	if err != nil {
		t.Fatal(err)
	}

	// Block schema changers so that the table we're about to DROP is not actually
	// dropped; it will be left in a "deleted" state.
	mu.Lock()
	clearSchemaChangers = true
	mu.Unlock()

	// DROP the table
	_, err = t.db.Exec(`DROP TABLE test.t`)
	if err != nil {
		t.Fatal(err)
	}

	// Make sure we can't get a lease on the descriptor.
	tableDesc := sqlbase.GetTableDescriptor(t.kvDB, "test", "t")
	// try to acquire at a bogus version to make sure we don't get back a lease we
	// already had.
	_, err = t.acquire(1, tableDesc.ID, tableDesc.Version+1)
	if !testutils.IsError(err, "table is being deleted") {
		t.Fatalf("got a different error than expected: %s", err)
	}
}
// TestAddingFKs checks the behavior of a table in the non-public `ADD` state.
// Being non-public, it should not be visible to clients, and is therefore
// assumed to be empty (e.g. by foreign key checks), since no one could have
// written to it yet.
func TestAddingFKs(t *testing.T) {
	defer leaktest.AfterTest(t)()

	params, _ := createTestServerParams()
	s, sqlDB, kvDB := serverutils.StartServer(t, params)
	defer s.Stopper().Stop()

	if _, err := sqlDB.Exec(`
		CREATE DATABASE t;
		CREATE TABLE t.products (id INT PRIMARY KEY);
		INSERT INTO t.products VALUES (1), (2);
		CREATE TABLE t.orders (id INT PRIMARY KEY, product INT REFERENCES t.products, INDEX (product));
	`); err != nil {
		t.Fatal(err)
	}

	// Step the referencing table back to the ADD state.
	ordersDesc := sqlbase.GetTableDescriptor(kvDB, "t", "orders")
	ordersDesc.State = sqlbase.TableDescriptor_ADD
	ordersDesc.Version++
	if err := kvDB.Put(
		sqlbase.MakeDescMetadataKey(ordersDesc.ID),
		sqlbase.WrapDescriptor(ordersDesc),
	); err != nil {
		t.Fatal(err)
	}

	// Generally a referenced table needs to lookup referencing tables to check
	// FKs during delete operations, but referencing tables in the ADD state are
	// given special treatment.
	if _, err := sqlDB.Exec(`DELETE FROM t.products`); err != nil {
		t.Fatal(err)
	}

	// Client should not see the orders table.
	if _, err := sqlDB.Exec(
		`SELECT * FROM t.orders`,
	); !testutils.IsError(err, "table is being added") {
		t.Fatal(err)
	}
}
Esempio n. 6
0
// Tests that a name cache entry with by an expired lease is not returned.
func TestNameCacheEntryDoesntReturnExpiredLease(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s, sqlDB, kvDB, cleanup := sqlutils.SetupServer(t)
	defer cleanup()
	leaseManager := s.LeaseManager().(*LeaseManager)

	if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR);
`); err != nil {
		t.Fatal(err)
	}

	// Populate the name cache.
	if _, err := sqlDB.Exec("SELECT * FROM t.test;"); err != nil {
		t.Fatal(err)
	}

	tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test")

	// Check the assumptions this tests makes: that there is a cache entry
	// (with a valid lease).
	lease := leaseManager.tableNames.get(tableDesc.ParentID, "test", s.Clock())
	if lease == nil {
		t.Fatalf("no name cache entry")
	}
	if err := leaseManager.Release(lease); err != nil {
		t.Fatal(err)
	}
	// Advance the clock to expire the lease.
	s.Clock().SetMaxOffset(10 * LeaseDuration)
	s.Clock().Update(s.Clock().Now().Add(int64(2*LeaseDuration), 0))

	// Check the the name no longer resolves.
	if leaseManager.tableNames.get(tableDesc.ParentID, "test", s.Clock()) != nil {
		t.Fatalf("name resolves when it shouldn't")
	}
}
// TestAcquireFreshestFromStoreRaces runs
// LeaseManager.acquireFreshestFromStore() in parallel to test for races.
func TestAcquireFreshestFromStoreRaces(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{})
	defer s.Stopper().Stop()
	leaseManager := s.LeaseManager().(*LeaseManager)

	if _, err := db.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR);
`); err != nil {
		t.Fatal(err)
	}

	tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test")

	var wg sync.WaitGroup
	numRoutines := 10
	wg.Add(numRoutines)
	for i := 0; i < numRoutines; i++ {
		go func() {
			defer wg.Done()
			err := kvDB.Txn(func(txn *client.Txn) error {
				lease, err := leaseManager.acquireFreshestFromStore(txn, tableDesc.ID)
				if err != nil {
					return err
				}
				if err := leaseManager.Release(lease); err != nil {
					return err
				}
				return nil
			})
			if err != nil {
				t.Error(err)
			}
		}()
	}
	wg.Wait()
}
func TestOperationsWithColumnMutation(t *testing.T) {
	defer leaktest.AfterTest(t)()
	// The descriptor changes made must have an immediate effect
	// so disable leases on tables.
	defer csql.TestDisableTableLeases()()
	// Disable external processing of mutations.
	params, _ := createTestServerParams()
	params.Knobs.SQLSchemaChangeManager = &csql.SchemaChangeManagerTestingKnobs{
		AsyncSchemaChangerExecNotification: schemaChangeManagerDisabled,
	}
	server, sqlDB, kvDB := serverutils.StartServer(t, params)
	defer server.Stopper().Stop()

	if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR, i CHAR DEFAULT 'i');
`); err != nil {
		t.Fatal(err)
	}

	// read table descriptor
	tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test")

	mTest := mutationTest{
		T:         t,
		kvDB:      kvDB,
		sqlDB:     sqlDB,
		tableDesc: tableDesc,
	}

	starQuery := `SELECT * FROM t.test`
	// Run the tests for both states.
	for _, state := range []sqlbase.DescriptorMutation_State{sqlbase.DescriptorMutation_DELETE_ONLY, sqlbase.DescriptorMutation_WRITE_ONLY} {
		// Init table to start state.
		if _, err := sqlDB.Exec(`TRUNCATE TABLE t.test`); err != nil {
			t.Fatal(err)
		}
		initRows := [][]string{{"a", "z", "q"}}
		for _, row := range initRows {
			if _, err := sqlDB.Exec(`INSERT INTO t.test VALUES ($1, $2, $3)`, row[0], row[1], row[2]); err != nil {
				t.Fatal(err)
			}
		}
		// Check that the table only contains the initRows.
		_ = mTest.checkQueryResponse(starQuery, initRows)

		// Add column "i" as a mutation.
		mTest.writeColumnMutation("i", sqlbase.DescriptorMutation{State: state})
		// A direct read of column "i" fails.
		if _, err := sqlDB.Query(`SELECT i FROM t.test`); err == nil {
			t.Fatalf("Read succeeded despite column being in %v state", sqlbase.DescriptorMutation{State: state})
		}
		// The table only contains columns "k" and "v".
		_ = mTest.checkQueryResponse(starQuery, [][]string{{"a", "z"}})

		// The column backfill uses Put instead of CPut because it depends on
		// an INSERT of a column in the WRITE_ONLY state failing. These two
		// tests guarantee that.

		// Inserting a row into the table while specifying column "i" results in an error.
		if _, err := sqlDB.Exec(`INSERT INTO t.test (k, v, i) VALUES ('b', 'y', 'i')`); !testutils.IsError(err, `column "i" does not exist`) {
			t.Fatal(err)
		}
		// Repeating the same without specifying the columns results in a different error.
		if _, err := sqlDB.Exec(`INSERT INTO t.test VALUES ('b', 'y', 'i')`); !testutils.IsError(err, "INSERT error: table t.test has 2 columns but 3 values were supplied") {
			t.Fatal(err)
		}
		// Make column "i" live so that it is read.
		mTest.makeMutationsActive()
		// Check that we can read all the rows and columns.
		_ = mTest.checkQueryResponse(starQuery, initRows)

		var afterInsert, afterUpdate, afterDelete [][]string
		if state == sqlbase.DescriptorMutation_DELETE_ONLY {
			// The default value of "i" for column "i" is not written.
			afterInsert = [][]string{{"a", "z", "q"}, {"c", "x", "NULL"}}
			// Update is a noop for column "i".
			afterUpdate = [][]string{{"a", "u", "q"}, {"c", "x", "NULL"}}
			// Delete also deletes column "i".
			afterDelete = [][]string{{"c", "x", "NULL"}}
		} else {
			// The default value of "i" for column "i" is written.
			afterInsert = [][]string{{"a", "z", "q"}, {"c", "x", "i"}}
			// Update is a noop for column "i".
			afterUpdate = [][]string{{"a", "u", "q"}, {"c", "x", "i"}}
			// Delete also deletes column "i".
			afterDelete = [][]string{{"c", "x", "i"}}
		}
		// Make column "i" a mutation.
		mTest.writeColumnMutation("i", sqlbase.DescriptorMutation{State: state})
		// Insert a row into the table.
		if _, err := sqlDB.Exec(`INSERT INTO t.test VALUES ('c', 'x')`); err != nil {
			t.Fatal(err)
		}
		// Make column "i" live so that it is read.
		mTest.makeMutationsActive()
		// Notice that the default value of "i" is only written when the
		// descriptor is in the WRITE_ONLY state.
		_ = mTest.checkQueryResponse(starQuery, afterInsert)

		// The column backfill uses Put instead of CPut because it depends on
		// an UPDATE of a column in the WRITE_ONLY state failing. This test
		// guarantees that.

		// Make column "i" a mutation.
		mTest.writeColumnMutation("i", sqlbase.DescriptorMutation{State: state})
		// Updating column "i" for a row fails.
		if _, err := sqlDB.Exec(`UPDATE t.test SET (v, i) = ('u', 'u') WHERE k = 'a'`); !testutils.IsError(err, `column "i" does not exist`) {
			t.Fatal(err)
		}
		// Make column "i" live so that it is read.
		mTest.makeMutationsActive()
		// The above failed update was a noop.
		_ = mTest.checkQueryResponse(starQuery, afterInsert)

		// Make column "i" a mutation.
		mTest.writeColumnMutation("i", sqlbase.DescriptorMutation{State: state})
		// Update a row without specifying  mutation column "i".
		if _, err := sqlDB.Exec(`UPDATE t.test SET v = 'u' WHERE k = 'a'`); err != nil {
			t.Fatal(err)
		}
		// Make column "i" live so that it is read.
		mTest.makeMutationsActive()
		// The update to column "v" is seen; there is no effect on column "i".
		_ = mTest.checkQueryResponse(starQuery, afterUpdate)

		// Make column "i" a mutation.
		mTest.writeColumnMutation("i", sqlbase.DescriptorMutation{State: state})
		// Delete row "a".
		if _, err := sqlDB.Exec(`DELETE FROM t.test WHERE k = 'a'`); err != nil {
			t.Fatal(err)
		}
		// Make column "i" live so that it is read.
		mTest.makeMutationsActive()
		// Row "a" is deleted. numVals is the number of non-NULL values seen,
		// or the number of KV values belonging to all the rows in the table
		// excluding row "a" since it's deleted.
		numVals := mTest.checkQueryResponse(starQuery, afterDelete)
		// Check that there are no hidden KV values for row "a",
		// and column "i" for row "a" was deleted.
		mTest.checkTableSize(numVals)
	}

	// Check that a mutation can only be inserted with an explicit mutation state, and direction.
	tableDesc = mTest.tableDesc
	tableDesc.Mutations = []sqlbase.DescriptorMutation{{}}
	if err := tableDesc.Validate(); !testutils.IsError(err, "mutation in state UNKNOWN, direction NONE, and no column/index descriptor") {
		t.Fatal(err)
	}
	tableDesc.Mutations = []sqlbase.DescriptorMutation{{Descriptor_: &sqlbase.DescriptorMutation_Column{Column: &tableDesc.Columns[len(tableDesc.Columns)-1]}}}
	tableDesc.Columns = tableDesc.Columns[:len(tableDesc.Columns)-1]
	if err := tableDesc.Validate(); !testutils.IsError(err, "mutation in state UNKNOWN, direction NONE, col i, id 3") {
		t.Fatal(err)
	}
	tableDesc.Mutations[0].State = sqlbase.DescriptorMutation_DELETE_ONLY
	if err := tableDesc.Validate(); !testutils.IsError(err, "mutation in state DELETE_ONLY, direction NONE, col i, id 3") {
		t.Fatal(err)
	}
	tableDesc.Mutations[0].State = sqlbase.DescriptorMutation_UNKNOWN
	tableDesc.Mutations[0].Direction = sqlbase.DescriptorMutation_DROP
	if err := tableDesc.Validate(); !testutils.IsError(err, "mutation in state UNKNOWN, direction DROP, col i, id 3") {
		t.Fatal(err)
	}
}
Esempio n. 9
0
func TestTableReader(t *testing.T) {
	defer leaktest.AfterTest(t)()

	_, sqlDB, kvDB, cleanup := sqlutils.SetupServer(t)
	defer cleanup()

	if _, err := sqlDB.Exec(`
		CREATE DATABASE test;
		CREATE TABLE test.t (a INT PRIMARY KEY, b INT, c INT, d INT, INDEX bc (b, c));
		INSERT INTO test.t VALUES (1, 10, 11, 12), (2, 20, 21, 22), (3, 30, 31, 32);
		INSERT INTO test.t VALUES (4, 60, 61, 62), (5, 50, 51, 52), (6, 40, 41, 42);
	`); err != nil {
		t.Fatal(err)
	}

	td := sqlbase.GetTableDescriptor(kvDB, "test", "t")

	ts := TableReaderSpec{
		Table:         *td,
		IndexIdx:      0,
		Reverse:       false,
		Spans:         nil,
		Filter:        Expression{Expr: "$2 != 21"}, // c != 21
		OutputColumns: []uint32{0, 3},               // a, d
	}

	txn := client.NewTxn(context.Background(), *kvDB)

	out := &testingReceiver{}
	tr, err := newTableReader(&ts, txn, out, parser.EvalContext{})
	if err != nil {
		t.Fatal(err)
	}
	tr.run()
	if out.err != nil {
		t.Fatal(out.err)
	}
	if !out.closed {
		t.Fatalf("output rowReceiver not closed")
	}
	expected := "[[1 12] [3 32] [4 62] [5 52] [6 42]]"
	if fmt.Sprintf("%s", out.rows) != expected {
		t.Errorf("invalid results: %s, expected %s'", out.rows, expected)
	}

	// Read using the bc index
	var span roachpb.Span
	span.Key = roachpb.Key(sqlbase.MakeIndexKeyPrefix(td.ID, td.Indexes[0].ID))
	span.EndKey = append(span.Key, encoding.EncodeVarintAscending(nil, 50)...)

	ts = TableReaderSpec{
		Table:         *td,
		IndexIdx:      1,
		Reverse:       true,
		Spans:         []TableReaderSpan{{Span: span}},
		Filter:        Expression{Expr: "$1 != 30"}, // b != 30
		OutputColumns: []uint32{0, 2},               // a, c
	}
	out = &testingReceiver{}
	tr, err = newTableReader(&ts, txn, out, parser.EvalContext{})
	if err != nil {
		t.Fatal(err)
	}
	tr.run()
	if out.err != nil {
		t.Fatal(out.err)
	}
	if !out.closed {
		t.Fatalf("output rowReceiver not closed")
	}
	expected = "[[6 41] [2 21] [1 11]]"
	if fmt.Sprintf("%s", out.rows) != expected {
		t.Errorf("invalid results: %s, expected %s'", out.rows, expected)
	}
}
Esempio n. 10
0
func TestDropTable(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s, sqlDB, kvDB := setup(t)
	defer cleanup(s, sqlDB)

	if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.kv (k CHAR PRIMARY KEY, v CHAR);
INSERT INTO t.kv VALUES ('c', 'e'), ('a', 'c'), ('b', 'd');
`); err != nil {
		t.Fatal(err)
	}

	tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "kv")
	nameKey := sqlbase.MakeNameMetadataKey(keys.MaxReservedDescID+1, "kv")
	gr, err := kvDB.Get(nameKey)

	if err != nil {
		t.Fatal(err)
	}

	if !gr.Exists() {
		t.Fatalf("Name entry %q does not exist", nameKey)
	}

	descKey := sqlbase.MakeDescMetadataKey(sqlbase.ID(gr.ValueInt()))

	// Add a zone config for the table.
	cfg := config.DefaultZoneConfig()
	buf, err := protoutil.Marshal(&cfg)
	if err != nil {
		t.Fatal(err)
	}
	if _, err := sqlDB.Exec(`INSERT INTO system.zones VALUES ($1, $2)`, tableDesc.ID, buf); err != nil {
		t.Fatal(err)
	}

	zoneKey := sqlbase.MakeZoneKey(tableDesc.ID)
	if gr, err := kvDB.Get(zoneKey); err != nil {
		t.Fatal(err)
	} else if !gr.Exists() {
		t.Fatalf("zone config entry not found")
	}

	tablePrefix := keys.MakeTablePrefix(uint32(tableDesc.ID))
	tableStartKey := roachpb.Key(tablePrefix)
	tableEndKey := tableStartKey.PrefixEnd()
	if kvs, err := kvDB.Scan(tableStartKey, tableEndKey, 0); err != nil {
		t.Fatal(err)
	} else if l := 6; len(kvs) != l {
		t.Fatalf("expected %d key value pairs, but got %d", l, len(kvs))
	}

	if _, err := sqlDB.Exec(`DROP TABLE t.kv`); err != nil {
		t.Fatal(err)
	}

	// Test that deleted table cannot be used. This prevents regressions where
	// name -> descriptor ID caches might make this statement erronously work.
	if _, err := sqlDB.Exec(`SELECT * FROM t.kv`); !testutils.IsError(err, `table "t.kv" does not exist`) {
		t.Fatalf("different error than expected: %s", err)
	}

	if kvs, err := kvDB.Scan(tableStartKey, tableEndKey, 0); err != nil {
		t.Fatal(err)
	} else if l := 0; len(kvs) != l {
		t.Fatalf("expected %d key value pairs, but got %d", l, len(kvs))
	}

	if gr, err := kvDB.Get(descKey); err != nil {
		t.Fatal(err)
	} else if gr.Exists() {
		t.Fatalf("table descriptor still exists after the table is dropped")
	}

	if gr, err := kvDB.Get(nameKey); err != nil {
		t.Fatal(err)
	} else if gr.Exists() {
		t.Fatalf("table namekey still exists after the table is dropped")
	}

	if gr, err := kvDB.Get(zoneKey); err != nil {
		t.Fatal(err)
	} else if gr.Exists() {
		t.Fatalf("zone config entry still exists after the table is dropped")
	}
}
Esempio n. 11
0
func TestPurgeOldLeases(t *testing.T) {
	defer leaktest.AfterTest(t)()
	// We're going to block gossip so it doesn't come randomly and clear up the
	// leases we're artificially setting up.
	gossipSem := make(chan struct{}, 1)
	serverParams := testingshim.TestServerParams{
		Knobs: base.TestingKnobs{
			SQLLeaseManager: &LeaseManagerTestingKnobs{
				GossipUpdateEvent: func(cfg config.SystemConfig) {
					gossipSem <- struct{}{}
					<-gossipSem
				},
			},
		},
	}
	s, db, kvDB, cleanup := sqlutils.SetupServerWithParams(t, serverParams)
	defer cleanup()
	leaseManager := s.LeaseManager().(*LeaseManager)
	// Block gossip.
	gossipSem <- struct{}{}
	defer func() {
		// Unblock gossip.
		<-gossipSem
	}()

	if _, err := db.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR);
`); err != nil {
		t.Fatal(err)
	}

	tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test")

	var leases []*LeaseState
	err := kvDB.Txn(func(txn *client.Txn) error {
		for i := 0; i < 3; i++ {
			lease, err := leaseManager.acquireFreshestFromStore(txn, tableDesc.ID)
			if err != nil {
				t.Fatal(err)
			}
			leases = append(leases, lease)
			if err := leaseManager.Release(lease); err != nil {
				t.Fatal(err)
			}
		}
		return nil
	})
	if err != nil {
		t.Fatal(err)
	}
	ts := leaseManager.findTableState(tableDesc.ID, false, nil)
	if numLeases := getNumLeases(ts); numLeases != 3 {
		t.Fatalf("found %d leases instead of 3", numLeases)
	}

	if err := ts.purgeOldLeases(
		kvDB, false, 1 /* minVersion */, leaseManager.LeaseStore); err != nil {
		t.Fatal(err)
	}

	if numLeases := getNumLeases(ts); numLeases != 1 {
		t.Fatalf("found %d leases instead of 1", numLeases)
	}
	ts.mu.Lock()
	correctLease := ts.active.data[0] == leases[2]
	ts.mu.Unlock()
	if !correctLease {
		t.Fatalf("wrong lease survived purge")
	}
}
Esempio n. 12
0
func TestTableReader(t *testing.T) {
	defer leaktest.AfterTest(t)()

	_, sqlDB, kvDB, cleanup := sqlutils.SetupServer(t)
	defer cleanup()

	// Create a table where each row is:
	//
	//  |     a    |     b    |         sum         |         s           |
	//  |-----------------------------------------------------------------|
	//  | rowId/10 | rowId%10 | rowId/10 + rowId%10 | IntToEnglish(rowId) |

	aFn := func(row int) parser.Datum {
		return parser.NewDInt(parser.DInt(row / 10))
	}
	bFn := func(row int) parser.Datum {
		return parser.NewDInt(parser.DInt(row % 10))
	}
	sumFn := func(row int) parser.Datum {
		return parser.NewDInt(parser.DInt(row/10 + row%10))
	}

	sqlutils.CreateTable(t, sqlDB, "t",
		"a INT, b INT, sum INT, s STRING, PRIMARY KEY (a,b), INDEX bs (b,s)",
		99,
		sqlutils.ToRowFn(aFn, bFn, sumFn, sqlutils.RowEnglishFn))

	td := sqlbase.GetTableDescriptor(kvDB, "test", "t")

	makeIndexSpan := func(start, end int) TableReaderSpan {
		var span roachpb.Span
		prefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(td.ID, td.Indexes[0].ID))
		span.Key = append(prefix, encoding.EncodeVarintAscending(nil, int64(start))...)
		span.EndKey = append(span.EndKey, prefix...)
		span.EndKey = append(span.EndKey, encoding.EncodeVarintAscending(nil, int64(end))...)
		return TableReaderSpan{Span: span}
	}

	testCases := []struct {
		spec     TableReaderSpec
		expected string
	}{
		{
			spec: TableReaderSpec{
				Filter:        Expression{Expr: "$2 < 5 AND $1 != 3"}, // sum < 5 && b != 3
				OutputColumns: []uint32{0, 1},
			},
			expected: "[[0 1] [0 2] [0 4] [1 0] [1 1] [1 2] [2 0] [2 1] [2 2] [3 0] [3 1] [4 0]]",
		},
		{
			spec: TableReaderSpec{
				Filter:        Expression{Expr: "$2 < 5 AND $1 != 3"},
				OutputColumns: []uint32{3}, // s
				HardLimit:     4,
			},
			expected: "[['one'] ['two'] ['four'] ['one-zero']]",
		},
		{
			spec: TableReaderSpec{
				IndexIdx:      1,
				Reverse:       true,
				Spans:         []TableReaderSpan{makeIndexSpan(4, 6)},
				Filter:        Expression{Expr: "$0 < 3"}, // sum < 8
				OutputColumns: []uint32{0, 1},
				SoftLimit:     1,
			},
			expected: "[[2 5] [1 5] [0 5] [2 4] [1 4] [0 4]]",
		},
	}

	for _, c := range testCases {
		ts := c.spec
		ts.Table = *td

		txn := client.NewTxn(context.Background(), *kvDB)

		out := &RowBuffer{}
		tr, err := newTableReader(&ts, txn, out, &parser.EvalContext{})
		if err != nil {
			t.Fatal(err)
		}
		tr.Run(nil)
		if out.err != nil {
			t.Fatal(out.err)
		}
		if !out.closed {
			t.Fatalf("output RowReceiver not closed")
		}
		if result := out.rows.String(); result != c.expected {
			t.Errorf("invalid results: %s, expected %s'", result, c.expected)
		}
	}
}
// TestSchemaChangeCommandsWithPendingMutations tests how schema change
// commands behave when they are referencing schema elements that are
// mutations that are not yet live.
func TestSchemaChangeCommandsWithPendingMutations(t *testing.T) {
	defer leaktest.AfterTest(t)()
	// The descriptor changes made must have an immediate effect
	// so disable leases on tables.
	defer csql.TestDisableTableLeases()()
	// Disable external processing of mutations.
	params, _ := createTestServerParams()
	params.Knobs.SQLSchemaChangeManager = &csql.SchemaChangeManagerTestingKnobs{
		AsyncSchemaChangerExecNotification: schemaChangeManagerDisabled,
	}
	server, sqlDB, kvDB := serverutils.StartServer(t, params)
	defer server.Stopper().Stop()

	if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (a CHAR PRIMARY KEY, b CHAR, c CHAR, INDEX foo (c));
`); err != nil {
		t.Fatal(err)
	}

	// Read table descriptor
	tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test")

	mt := mutationTest{
		T:         t,
		kvDB:      kvDB,
		sqlDB:     sqlDB,
		tableDesc: tableDesc,
	}

	// Test CREATE INDEX in the presence of mutations.

	// Add index DROP mutation "foo""
	mt.writeIndexMutation("foo", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_DROP})
	if _, err := sqlDB.Exec(`CREATE INDEX foo ON t.test (c)`); !testutils.IsError(err, `index "foo" being dropped, try again later`) {
		t.Fatal(err)
	}
	// Make "foo" live.
	mt.makeMutationsActive()

	// "foo" is being added.
	mt.writeIndexMutation("foo", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_ADD})
	if _, err := sqlDB.Exec(`CREATE INDEX foo ON t.test (c)`); !testutils.IsError(err, `duplicate index name: "foo"`) {
		t.Fatal(err)
	}
	// Make "foo" live.
	mt.makeMutationsActive()
	// Add column DROP mutation "b"
	mt.writeColumnMutation("b", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_DROP})
	if _, err := sqlDB.Exec(`CREATE INDEX bar ON t.test (b)`); !testutils.IsError(err, `index "bar" contains unknown column "b"`) {
		t.Fatal(err)
	}
	// Make "b" live.
	mt.makeMutationsActive()
	// "b" is being added.
	mt.writeColumnMutation("b", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_ADD})
	// An index referencing a column mutation that is being added
	// is allowed to be added.
	if _, err := sqlDB.Exec(`CREATE INDEX bar ON t.test (b)`); err != nil {
		t.Fatal(err)
	}
	// Make "b" live.
	mt.makeMutationsActive()

	// Test DROP INDEX in the presence of mutations.

	// Add index DROP mutation "foo""
	mt.writeIndexMutation("foo", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_DROP})
	// Noop.
	if _, err := sqlDB.Exec(`DROP INDEX t.test@foo`); err != nil {
		t.Fatal(err)
	}
	// Make "foo" live.
	mt.makeMutationsActive()
	// "foo" is being added.
	mt.writeIndexMutation("foo", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_ADD})
	if _, err := sqlDB.Exec(`DROP INDEX t.test@foo`); !testutils.IsError(err, `index "foo" in the middle of being added, try again later`) {
		t.Fatal(err)
	}
	// Make "foo" live.
	mt.makeMutationsActive()
	// Test ALTER TABLE ADD/DROP column in the presence of mutations.

	// Add column DROP mutation "b"
	mt.writeColumnMutation("b", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_DROP})
	if _, err := sqlDB.Exec(`ALTER TABLE t.test ADD b CHAR`); !testutils.IsError(err, `column "b" being dropped, try again later`) {
		t.Fatal(err)
	}
	// Noop.
	if _, err := sqlDB.Exec(`ALTER TABLE t.test DROP b`); err != nil {
		t.Fatal(err)
	}
	// Make "b" live.
	mt.makeMutationsActive()
	// "b" is being added.
	mt.writeColumnMutation("b", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_ADD})
	if _, err := sqlDB.Exec(`ALTER TABLE t.test ADD b CHAR`); !testutils.IsError(err, `duplicate column name: "b"`) {
		t.Fatal(err)
	}
	if _, err := sqlDB.Exec(`ALTER TABLE t.test DROP b`); !testutils.IsError(err, `column "b" in the middle of being added, try again later`) {
		t.Fatal(err)
	}
	// Make "b" live.
	mt.makeMutationsActive()

	// Test ALTER TABLE ADD CONSTRAINT in the presence of mutations.

	// Add index DROP mutation "foo""
	mt.writeIndexMutation("foo", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_DROP})
	if _, err := sqlDB.Exec(`ALTER TABLE t.test ADD CONSTRAINT foo UNIQUE (c)`); !testutils.IsError(err, `index "foo" being dropped, try again later`) {
		t.Fatal(err)
	}
	// Make "foo" live.
	mt.makeMutationsActive()
	// "foo" is being added.
	mt.writeIndexMutation("foo", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_ADD})
	if _, err := sqlDB.Exec(`ALTER TABLE t.test ADD CONSTRAINT foo UNIQUE (c)`); !testutils.IsError(err, `duplicate index name: "foo"`) {
		t.Fatal(err)
	}
	// Make "foo" live.
	mt.makeMutationsActive()
	// Add column mutation "b"
	mt.writeColumnMutation("b", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_DROP})
	if _, err := sqlDB.Exec(`ALTER TABLE t.test ADD CONSTRAINT bar UNIQUE (b)`); !testutils.IsError(err, `index "bar" contains unknown column "b"`) {
		t.Fatal(err)
	}
	// Make "b" live.
	mt.makeMutationsActive()
	// "b" is being added.
	mt.writeColumnMutation("b", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_ADD})
	// Noop.
	if _, err := sqlDB.Exec(`ALTER TABLE t.test ADD CONSTRAINT bar UNIQUE (b)`); err != nil {
		t.Fatal(err)
	}
	// Make "b" live.
	mt.makeMutationsActive()

	// Test DROP CONSTRAINT in the presence of mutations.

	// Add index mutation "foo""
	mt.writeIndexMutation("foo", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_DROP})
	// Noop.
	if _, err := sqlDB.Exec(`ALTER TABLE t.test DROP CONSTRAINT foo`); err != nil {
		t.Fatal(err)
	}
	// Make "foo" live.
	mt.makeMutationsActive()
	// "foo" is being added.
	mt.writeIndexMutation("foo", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_ADD})
	if _, err := sqlDB.Exec(`ALTER TABLE t.test DROP CONSTRAINT foo`); !testutils.IsError(err, `constraint "foo" in the middle of being added, try again later`) {
		t.Fatal(err)
	}
	// Make "foo" live.
	mt.makeMutationsActive()

	// Rename column/index, while index is under mutation.

	// Add index mutation "foo""
	mt.writeIndexMutation("foo", sqlbase.DescriptorMutation{})
	if _, err := sqlDB.Exec(`ALTER INDEX t.test@foo RENAME to ufo`); err != nil {
		mt.Fatal(err)
	}
	if _, err := sqlDB.Exec(`ALTER TABLE t.test RENAME COLUMN c TO d`); err != nil {
		mt.Fatal(err)
	}
	// The mutation in the table descriptor has changed and we would like
	// to update our copy to make it live.
	mt.tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test")

	// Make "ufo" live.
	mt.makeMutationsActive()
	// The index has been renamed to ufo, and the column to d.
	_ = mt.checkQueryResponse("SHOW INDEXES FROM t.test", [][]string{{"test", "primary", "true", "1", "a", "ASC", "false"}, {"test", "ufo", "false", "1", "d", "ASC", "false"}})

	// Rename column under mutation works properly.

	// Add column mutation "b".
	mt.writeColumnMutation("b", sqlbase.DescriptorMutation{})
	if _, err := sqlDB.Exec(`ALTER TABLE t.test RENAME COLUMN b TO e`); err != nil {
		mt.Fatal(err)
	}

	// The mutation in the table descriptor has changed and we would like
	// to update our copy to make it live.
	mt.tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test")

	// Make column "e" live.
	mt.makeMutationsActive()
	// Column b changed to d.
	_ = mt.checkQueryResponse("SHOW COLUMNS FROM t.test", [][]string{{"a", "STRING", "false", "NULL"}, {"d", "STRING", "true", "NULL"}, {"e", "STRING", "true", "NULL"}})

	// Try to change column defaults while column is under mutation.
	mt.writeColumnMutation("e", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_ADD})
	if _, err := sqlDB.Exec(`ALTER TABLE t.test ALTER COLUMN e SET DEFAULT 'a'`); !testutils.IsError(
		err, `column "e" in the middle of being added`) {
		t.Fatal(err)
	}
	mt.makeMutationsActive()
	mt.writeColumnMutation("e", sqlbase.DescriptorMutation{Direction: sqlbase.DescriptorMutation_DROP})
	if _, err := sqlDB.Exec(`ALTER TABLE t.test ALTER COLUMN e SET DEFAULT 'a'`); !testutils.IsError(
		err, `column "e" in the middle of being dropped`) {
		t.Fatal(err)
	}
	mt.makeMutationsActive()
}
Esempio n. 14
0
// Test schema change purge failure doesn't leave DB in a bad state.
func TestSchemaChangePurgeFailure(t *testing.T) {
	defer leaktest.AfterTest(t)()

	params, _ := createTestServerParams()
	// Disable the async schema changer.
	var enableAsyncSchemaChanges uint32
	attempts := 0
	params.Knobs = base.TestingKnobs{
		SQLExecutor: &csql.ExecutorTestingKnobs{
			SchemaChangersStartBackfillNotification: func() error {
				attempts++
				// Return a deadline exceeded error during the second attempt
				// which attempts to clean up the schema change.
				if attempts == 2 {
					return errors.New("context deadline exceeded")
				}
				return nil
			},
		},
		SQLSchemaChangeManager: &csql.SchemaChangeManagerTestingKnobs{
			AsyncSchemaChangerExecNotification: func() error {
				if enable := atomic.LoadUint32(&enableAsyncSchemaChanges); enable == 0 {
					return errors.New("async schema changes are disabled")
				}
				return nil
			},
			// Speed up evaluation of async schema changes so that it
			// processes a purged schema change quickly.
			AsyncSchemaChangerExecQuickly: true,
		},
	}
	server, sqlDB, kvDB := serverutils.StartServer(t, params)
	defer server.Stopper().Stop()

	if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (k INT PRIMARY KEY, v INT);
`); err != nil {
		t.Fatal(err)
	}

	// Bulk insert.
	maxValue := csql.IndexBackfillChunkSize + 1
	insert := fmt.Sprintf(`INSERT INTO t.test VALUES (%d, %d)`, 0, maxValue)
	for i := 1; i <= maxValue; i++ {
		insert += fmt.Sprintf(` ,(%d, %d)`, i, maxValue-i)
	}
	if _, err := sqlDB.Exec(insert); err != nil {
		t.Fatal(err)
	}

	// Add a row with a duplicate value for v
	if _, err := sqlDB.Exec(
		`INSERT INTO t.test VALUES ($1, $2)`, maxValue+1, maxValue,
	); err != nil {
		t.Fatal(err)
	}

	// A schema change that violates integrity constraints.
	if _, err := sqlDB.Exec(
		"CREATE UNIQUE INDEX foo ON t.test (v)",
	); !testutils.IsError(err, "violates unique constraint") {
		t.Fatal(err)
	}
	// The deadline exceeded error in the schema change purge results in no
	// retry attempts of the purge.
	if e := 2; attempts != e {
		t.Fatalf("%d retries, despite allowing only (schema change + reverse) = %d", attempts, e)
	}

	// The index doesn't exist
	if _, err := sqlDB.Query(
		`SELECT v from t.test@foo`,
	); !testutils.IsError(err, "index .* not found") {
		t.Fatal(err)
	}

	// Read table descriptor.
	tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test")

	// There is still a mutation hanging off of it.
	if e := 1; len(tableDesc.Mutations) != e {
		t.Fatalf("the table has %d instead of %d mutations", len(tableDesc.Mutations), e)
	}
	// The mutation is for a DROP.
	if tableDesc.Mutations[0].Direction != sqlbase.DescriptorMutation_DROP {
		t.Fatalf("the table has mutation %v instead of a DROP", tableDesc.Mutations[0])
	}

	// There is still some garbage index data that needs to be purged. All the
	// rows from k = 0 to k = maxValue have index values. The k = maxValue + 1
	// row with the conflict doesn't contain an index value.
	numGarbageValues := csql.IndexBackfillChunkSize
	tablePrefix := roachpb.Key(keys.MakeTablePrefix(uint32(tableDesc.ID)))
	tableEnd := tablePrefix.PrefixEnd()
	if kvs, err := kvDB.Scan(tablePrefix, tableEnd, 0); err != nil {
		t.Fatal(err)
	} else if e := 1*(maxValue+2) + numGarbageValues; len(kvs) != e {
		t.Fatalf("expected %d key value pairs, but got %d", e, len(kvs))
	}

	// Enable async schema change processing to ensure that it cleans up the
	// above garbage left behind.
	atomic.StoreUint32(&enableAsyncSchemaChanges, 1)

	util.SucceedsSoon(t, func() error {
		tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test")
		if len(tableDesc.Mutations) > 0 {
			return errors.Errorf("%d mutations remaining", len(tableDesc.Mutations))
		}
		return nil
	})

	// No garbage left behind.
	numGarbageValues = 0
	if kvs, err := kvDB.Scan(tablePrefix, tableEnd, 0); err != nil {
		t.Fatal(err)
	} else if e := 1*(maxValue+2) + numGarbageValues; len(kvs) != e {
		t.Fatalf("expected %d key value pairs, but got %d", e, len(kvs))
	}
}
Esempio n. 15
0
// Test schema change backfills are not affected by various operations
// that run simultaneously.
func TestRaceWithBackfill(t *testing.T) {
	defer leaktest.AfterTest(t)()
	t.Skip("#7628")

	var backfillNotification chan bool
	params, _ := createTestServerParams()
	// Disable asynchronous schema change execution to allow synchronous path
	// to trigger start of backfill notification.
	params.Knobs = base.TestingKnobs{
		SQLExecutor: &csql.ExecutorTestingKnobs{
			SchemaChangersStartBackfillNotification: func() error {
				if backfillNotification != nil {
					// Close channel to notify that the backfill has started.
					close(backfillNotification)
				}
				return nil
			},
		},
		SQLSchemaChangeManager: &csql.SchemaChangeManagerTestingKnobs{
			AsyncSchemaChangerExecNotification: schemaChangeManagerDisabled,
		},
	}
	server, sqlDB, kvDB := serverutils.StartServer(t, params)
	defer server.Stopper().Stop()

	if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (k INT PRIMARY KEY, v INT, pi DECIMAL DEFAULT (DECIMAL '3.14'));
CREATE UNIQUE INDEX vidx ON t.test (v);
`); err != nil {
		t.Fatal(err)
	}

	// Bulk insert.
	maxValue := 4000
	insert := fmt.Sprintf(`INSERT INTO t.test VALUES (%d, %d)`, 0, maxValue)
	for i := 1; i <= maxValue; i++ {
		insert += fmt.Sprintf(` ,(%d, %d)`, i, maxValue-i)
	}
	if _, err := sqlDB.Exec(insert); err != nil {
		t.Fatal(err)
	}

	// Read table descriptor for version.
	tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test")
	tablePrefix := roachpb.Key(keys.MakeTablePrefix(uint32(tableDesc.ID)))
	tableEnd := tablePrefix.PrefixEnd()
	// number of keys == 3 * number of rows; 2 column families and 1 index entry
	// for each row.
	if kvs, err := kvDB.Scan(tablePrefix, tableEnd, 0); err != nil {
		t.Fatal(err)
	} else if e := 3 * (maxValue + 1); len(kvs) != e {
		t.Fatalf("expected %d key value pairs, but got %d", e, len(kvs))
	}

	// Run some schema changes with operations.

	// Add column.
	backfillNotification = make(chan bool)
	runSchemaChangeWithOperations(
		t,
		sqlDB,
		kvDB,
		"ALTER TABLE t.test ADD COLUMN x DECIMAL DEFAULT (DECIMAL '1.4')",
		maxValue,
		4,
		backfillNotification)

	// Drop column.
	backfillNotification = make(chan bool)
	runSchemaChangeWithOperations(
		t,
		sqlDB,
		kvDB,
		"ALTER TABLE t.test DROP pi",
		maxValue,
		3,
		backfillNotification)

	// Add index.
	backfillNotification = make(chan bool)
	runSchemaChangeWithOperations(
		t,
		sqlDB,
		kvDB,
		"CREATE UNIQUE INDEX foo ON t.test (v)",
		maxValue,
		4,
		backfillNotification)

	// Drop index.
	backfillNotification = make(chan bool)
	runSchemaChangeWithOperations(
		t,
		sqlDB,
		kvDB,
		"DROP INDEX t.test@vidx",
		maxValue,
		3,
		backfillNotification)

	// Verify that the index foo over v is consistent, and that column x has
	// been backfilled properly.
	rows, err := sqlDB.Query(`SELECT v, x from t.test@foo`)
	if err != nil {
		t.Fatal(err)
	}

	count := 0
	for ; rows.Next(); count++ {
		var val int
		var x float64
		if err := rows.Scan(&val, &x); err != nil {
			t.Errorf("row %d scan failed: %s", count, err)
			continue
		}
		if count != val {
			t.Errorf("e = %d, v = %d", count, val)
		}
		if 1.4 != x {
			t.Errorf("e = %f, v = %f", 1.4, x)
		}
	}
	if err := rows.Err(); err != nil {
		t.Fatal(err)
	}
	eCount := maxValue + 1
	if eCount != count {
		t.Fatalf("read the wrong number of rows: e = %d, v = %d", eCount, count)
	}

	// Verify that a table delete in the middle of a backfill works properly.
	// The backfill will terminate in the middle, and the delete will
	// successfully delete all the table data.
	//
	// This test could be made its own test but is placed here to speed up the
	// testing.

	backfillNotification = make(chan bool)
	// Run the schema change in a separate goroutine.
	var wg sync.WaitGroup
	wg.Add(1)
	go func() {
		// Start schema change that eventually runs a backfill.
		if _, err := sqlDB.Exec("CREATE UNIQUE INDEX bar ON t.test (v)"); err != nil {
			t.Error(err)
		}
		wg.Done()
	}()

	// Wait until the schema change backfill starts.
	<-backfillNotification

	// Wait for a short bit to ensure that the backfill has likely progressed
	// and written some data, but not long enough that the backfill has
	// completed.
	time.Sleep(10 * time.Millisecond)

	if _, err := sqlDB.Exec("DROP TABLE t.test"); err != nil {
		t.Fatal(err)
	}

	// Wait until the schema change is done.
	wg.Wait()

	// Ensure that the table data has been deleted.
	if kvs, err := kvDB.Scan(tablePrefix, tableEnd, 0); err != nil {
		t.Fatal(err)
	} else if len(kvs) != 0 {
		t.Fatalf("expected %d key value pairs, but got %d", 0, len(kvs))
	}
}
Esempio n. 16
0
// Run a particular schema change and run some OLTP operations in parallel, as
// soon as the schema change starts executing its backfill.
func runSchemaChangeWithOperations(
	t *testing.T,
	sqlDB *gosql.DB,
	kvDB *client.DB,
	schemaChange string,
	maxValue int,
	keyMultiple int,
	backfillNotification chan bool,
) {
	tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test")

	// Run the schema change in a separate goroutine.
	var wg sync.WaitGroup
	wg.Add(1)
	go func() {
		start := timeutil.Now()
		// Start schema change that eventually runs a backfill.
		if _, err := sqlDB.Exec(schemaChange); err != nil {
			t.Error(err)
		}
		t.Logf("schema change %s took %v", schemaChange, timeutil.Since(start))
		wg.Done()
	}()

	// Wait until the schema change backfill starts.
	<-backfillNotification

	// Run a variety of operations during the backfill.

	// Grabbing a schema change lease on the table will fail, disallowing
	// another schema change from being simultaneously executed.
	sc := csql.NewSchemaChangerForTesting(tableDesc.ID, 0, 0, *kvDB, nil)
	if l, err := sc.AcquireLease(); err == nil {
		t.Fatalf("schema change lease acquisition on table %d succeeded: %v", tableDesc.ID, l)
	}

	// Update some rows.
	var updatedKeys []int
	for i := 0; i < 10; i++ {
		k := rand.Intn(maxValue)
		v := maxValue + i + 1
		if _, err := sqlDB.Exec(`UPDATE t.test SET v = $2 WHERE k = $1`, k, v); err != nil {
			t.Fatal(err)
		}
		updatedKeys = append(updatedKeys, k)
	}

	// Reupdate updated values back to what they were before.
	for _, k := range updatedKeys {
		if _, err := sqlDB.Exec(`UPDATE t.test SET v = $2 WHERE k = $1`, k, maxValue-k); err != nil {
			t.Fatal(err)
		}
	}

	// Delete some rows.
	deleteStartKey := rand.Intn(maxValue - 10)
	for i := 0; i < 10; i++ {
		if _, err := sqlDB.Exec(`DELETE FROM t.test WHERE k = $1`, deleteStartKey+i); err != nil {
			t.Fatal(err)
		}
	}
	// Reinsert deleted rows.
	for i := 0; i < 10; i++ {
		k := deleteStartKey + i
		if _, err := sqlDB.Exec(`INSERT INTO t.test VALUES($1, $2)`, k, maxValue-k); err != nil {
			t.Fatal(err)
		}
	}

	// Insert some new rows.
	numInserts := 10
	for i := 0; i < numInserts; i++ {
		if _, err := sqlDB.Exec(`INSERT INTO t.test VALUES($1, $2)`, maxValue+i+1, maxValue+i+1); err != nil {
			t.Fatal(err)
		}
	}

	wg.Wait() // for schema change to complete.

	// Verify the number of keys left behind in the table to validate schema
	// change operations.
	tablePrefix := roachpb.Key(keys.MakeTablePrefix(uint32(tableDesc.ID)))
	tableEnd := tablePrefix.PrefixEnd()
	if kvs, err := kvDB.Scan(tablePrefix, tableEnd, 0); err != nil {
		t.Fatal(err)
	} else if e := keyMultiple * (maxValue + numInserts + 1); len(kvs) != e {
		t.Fatalf("expected %d key value pairs, but got %d", e, len(kvs))
	}

	// Delete the rows inserted.
	for i := 0; i < numInserts; i++ {
		if _, err := sqlDB.Exec(`DELETE FROM t.test WHERE k = $1`, maxValue+i+1); err != nil {
			t.Fatal(err)
		}
	}
}
Esempio n. 17
0
func TestAsyncSchemaChanger(t *testing.T) {
	defer leaktest.AfterTest(t)()
	// The descriptor changes made must have an immediate effect
	// so disable leases on tables.
	defer csql.TestDisableTableLeases()()
	// Disable synchronous schema change execution so the asynchronous schema
	// changer executes all schema changes.
	params, _ := createTestServerParams()
	params.Knobs = base.TestingKnobs{
		SQLExecutor: &csql.ExecutorTestingKnobs{
			SyncSchemaChangersFilter: func(tscc csql.TestingSchemaChangerCollection) {
				tscc.ClearSchemaChangers()
			},
		},
		SQLSchemaChangeManager: &csql.SchemaChangeManagerTestingKnobs{
			AsyncSchemaChangerExecQuickly: true,
		},
	}
	s, sqlDB, kvDB := serverutils.StartServer(t, params)
	defer s.Stopper().Stop()

	if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR);
INSERT INTO t.test VALUES ('a', 'b'), ('c', 'd');
`); err != nil {
		t.Fatal(err)
	}

	// Read table descriptor for version.
	tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test")

	// A long running schema change operation runs through
	// a state machine that increments the version by 3.
	expectedVersion := tableDesc.Version + 3

	// Run some schema change
	if _, err := sqlDB.Exec(`
CREATE INDEX foo ON t.test (v)
`); err != nil {
		t.Fatal(err)
	}

	retryOpts := retry.Options{
		InitialBackoff: 20 * time.Millisecond,
		MaxBackoff:     200 * time.Millisecond,
		Multiplier:     2,
	}

	// Wait until index is created.
	for r := retry.Start(retryOpts); r.Next(); {
		tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test")
		if len(tableDesc.Indexes) == 1 {
			break
		}
	}

	// Ensure that the indexes have been created.
	mTest := mutationTest{
		T:         t,
		kvDB:      kvDB,
		sqlDB:     sqlDB,
		tableDesc: tableDesc,
	}
	indexQuery := `SELECT v FROM t.test@foo`
	_ = mTest.checkQueryResponse(indexQuery, [][]string{{"b"}, {"d"}})

	// Ensure that the version has been incremented.
	tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test")
	newVersion := tableDesc.Version
	if newVersion != expectedVersion {
		t.Fatalf("bad version; e = %d, v = %d", expectedVersion, newVersion)
	}

	// Apply a schema change that only sets the UpVersion bit.
	expectedVersion = newVersion + 1

	if _, err := sqlDB.Exec(`
ALTER INDEX t.test@foo RENAME TO ufo
`); err != nil {
		t.Fatal(err)
	}

	for r := retry.Start(retryOpts); r.Next(); {
		// Ensure that the version gets incremented.
		tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test")
		name := tableDesc.Indexes[0].Name
		if name != "ufo" {
			t.Fatalf("bad index name %s", name)
		}
		newVersion = tableDesc.Version
		if newVersion == expectedVersion {
			break
		}
	}

	// Run many schema changes simultaneously and check
	// that they all get executed.
	count := 5
	for i := 0; i < count; i++ {
		cmd := fmt.Sprintf(`CREATE INDEX foo%d ON t.test (v)`, i)
		if _, err := sqlDB.Exec(cmd); err != nil {
			t.Fatal(err)
		}
	}
	// Wait until indexes are created.
	for r := retry.Start(retryOpts); r.Next(); {
		tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test")
		if len(tableDesc.Indexes) == count+1 {
			break
		}
	}
	for i := 0; i < count; i++ {
		indexQuery := fmt.Sprintf(`SELECT v FROM t.test@foo%d`, i)
		_ = mTest.checkQueryResponse(indexQuery, [][]string{{"b"}, {"d"}})
	}
}
Esempio n. 18
0
func TestSchemaChangeProcess(t *testing.T) {
	defer leaktest.AfterTest(t)()
	// The descriptor changes made must have an immediate effect
	// so disable leases on tables.
	defer csql.TestDisableTableLeases()()

	params, _ := createTestServerParams()
	// Disable external processing of mutations.
	params.Knobs.SQLSchemaChangeManager = &csql.SchemaChangeManagerTestingKnobs{
		AsyncSchemaChangerExecNotification: schemaChangeManagerDisabled,
	}
	s, sqlDB, kvDB := serverutils.StartServer(t, params)
	defer s.Stopper().Stop()

	var id = sqlbase.ID(keys.MaxReservedDescID + 2)
	var node = roachpb.NodeID(2)
	stopper := stop.NewStopper()
	leaseMgr := csql.NewLeaseManager(0, *kvDB, hlc.NewClock(hlc.UnixNano), csql.LeaseManagerTestingKnobs{}, stopper)
	defer stopper.Stop()
	changer := csql.NewSchemaChangerForTesting(id, 0, node, *kvDB, leaseMgr)

	if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR, INDEX foo(v));
INSERT INTO t.test VALUES ('a', 'b'), ('c', 'd');
`); err != nil {
		t.Fatal(err)
	}

	// Read table descriptor for version.
	tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test")

	expectedVersion := tableDesc.Version

	desc, err := changer.MaybeIncrementVersion()
	if err != nil {
		t.Fatal(err)
	}
	tableDesc = desc.GetTable()
	newVersion := tableDesc.Version
	if newVersion != expectedVersion {
		t.Fatalf("bad version; e = %d, v = %d", expectedVersion, newVersion)
	}
	isDone, err := changer.IsDone()
	if err != nil {
		t.Fatal(err)
	}
	if !isDone {
		t.Fatalf("table expected to not have an outstanding schema change: %v", tableDesc)
	}

	// Check that MaybeIncrementVersion increments the version
	// correctly.
	expectedVersion++
	tableDesc.UpVersion = true
	if err := kvDB.Put(
		sqlbase.MakeDescMetadataKey(tableDesc.ID),
		sqlbase.WrapDescriptor(tableDesc),
	); err != nil {
		t.Fatal(err)
	}
	isDone, err = changer.IsDone()
	if err != nil {
		t.Fatal(err)
	}
	if isDone {
		t.Fatalf("table expected to have an outstanding schema change: %v", desc.GetTable())
	}
	desc, err = changer.MaybeIncrementVersion()
	if err != nil {
		t.Fatal(err)
	}
	tableDesc = desc.GetTable()
	savedTableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test")
	newVersion = tableDesc.Version
	if newVersion != expectedVersion {
		t.Fatalf("bad version in returned desc; e = %d, v = %d", expectedVersion, newVersion)
	}
	newVersion = savedTableDesc.Version
	if newVersion != expectedVersion {
		t.Fatalf("bad version in saved desc; e = %d, v = %d", expectedVersion, newVersion)
	}
	isDone, err = changer.IsDone()
	if err != nil {
		t.Fatal(err)
	}
	if !isDone {
		t.Fatalf("table expected to not have an outstanding schema change: %v", tableDesc)
	}

	// Check that RunStateMachineBeforeBackfill doesn't do anything
	// if there are no mutations queued.
	if err := changer.RunStateMachineBeforeBackfill(); err != nil {
		t.Fatal(err)
	}

	tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test")
	newVersion = tableDesc.Version
	if newVersion != expectedVersion {
		t.Fatalf("bad version; e = %d, v = %d", expectedVersion, newVersion)
	}

	// Check that RunStateMachineBeforeBackfill functions properly.
	expectedVersion = tableDesc.Version
	// Make a copy of the index for use in a mutation.
	index := protoutil.Clone(&tableDesc.Indexes[0]).(*sqlbase.IndexDescriptor)
	index.Name = "bar"
	index.ID = tableDesc.NextIndexID
	tableDesc.NextIndexID++
	changer = csql.NewSchemaChangerForTesting(id, tableDesc.NextMutationID, node, *kvDB, leaseMgr)
	tableDesc.Mutations = append(tableDesc.Mutations, sqlbase.DescriptorMutation{
		Descriptor_: &sqlbase.DescriptorMutation_Index{Index: index},
		Direction:   sqlbase.DescriptorMutation_ADD,
		State:       sqlbase.DescriptorMutation_DELETE_ONLY,
		MutationID:  tableDesc.NextMutationID,
	})
	tableDesc.NextMutationID++

	// Run state machine in both directions.
	for _, direction := range []sqlbase.DescriptorMutation_Direction{sqlbase.DescriptorMutation_ADD, sqlbase.DescriptorMutation_DROP} {
		tableDesc.Mutations[0].Direction = direction
		expectedVersion++
		if err := kvDB.Put(
			sqlbase.MakeDescMetadataKey(tableDesc.ID),
			sqlbase.WrapDescriptor(tableDesc),
		); err != nil {
			t.Fatal(err)
		}
		// The expected end state.
		expectedState := sqlbase.DescriptorMutation_WRITE_ONLY
		if direction == sqlbase.DescriptorMutation_DROP {
			expectedState = sqlbase.DescriptorMutation_DELETE_ONLY
		}
		// Run two times to ensure idempotency of operations.
		for i := 0; i < 2; i++ {
			if err := changer.RunStateMachineBeforeBackfill(); err != nil {
				t.Fatal(err)
			}

			tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test")
			newVersion = tableDesc.Version
			if newVersion != expectedVersion {
				t.Fatalf("bad version; e = %d, v = %d", expectedVersion, newVersion)
			}
			state := tableDesc.Mutations[0].State
			if state != expectedState {
				t.Fatalf("bad state; e = %d, v = %d", expectedState, state)
			}
		}
	}
	// RunStateMachineBeforeBackfill() doesn't complete the schema change.
	isDone, err = changer.IsDone()
	if err != nil {
		t.Fatal(err)
	}
	if isDone {
		t.Fatalf("table expected to have an outstanding schema change: %v", tableDesc)
	}

}
Esempio n. 19
0
func TestJoinReader(t *testing.T) {
	defer leaktest.AfterTest(t)()

	s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{})
	defer s.Stopper().Stop()

	// Create a table where each row is:
	//
	//  |     a    |     b    |         sum         |         s           |
	//  |-----------------------------------------------------------------|
	//  | rowId/10 | rowId%10 | rowId/10 + rowId%10 | IntToEnglish(rowId) |

	aFn := func(row int) parser.Datum {
		return parser.NewDInt(parser.DInt(row / 10))
	}
	bFn := func(row int) parser.Datum {
		return parser.NewDInt(parser.DInt(row % 10))
	}
	sumFn := func(row int) parser.Datum {
		return parser.NewDInt(parser.DInt(row/10 + row%10))
	}

	sqlutils.CreateTable(t, sqlDB, "t",
		"a INT, b INT, sum INT, s STRING, PRIMARY KEY (a,b), INDEX bs (b,s)",
		99,
		sqlutils.ToRowFn(aFn, bFn, sumFn, sqlutils.RowEnglishFn))

	td := sqlbase.GetTableDescriptor(kvDB, "test", "t")

	testCases := []struct {
		spec     JoinReaderSpec
		input    [][]parser.Datum
		expected string
	}{
		{
			spec: JoinReaderSpec{
				OutputColumns: []uint32{0, 1, 2},
			},
			input: [][]parser.Datum{
				{aFn(2), bFn(2)},
				{aFn(5), bFn(5)},
				{aFn(10), bFn(10)},
				{aFn(15), bFn(15)},
			},
			expected: "[[0 2 2] [0 5 5] [1 0 1] [1 5 6]]",
		},
		{
			spec: JoinReaderSpec{
				Filter:        Expression{Expr: "$2 <= 5"}, // sum <= 5
				OutputColumns: []uint32{3},
			},
			input: [][]parser.Datum{
				{aFn(1), bFn(1)},
				{aFn(25), bFn(25)},
				{aFn(5), bFn(5)},
				{aFn(21), bFn(21)},
				{aFn(34), bFn(34)},
				{aFn(13), bFn(13)},
				{aFn(51), bFn(51)},
				{aFn(50), bFn(50)},
			},
			expected: "[['one'] ['five'] ['two-one'] ['one-three'] ['five-zero']]",
		},
	}
	for _, c := range testCases {
		js := c.spec
		js.Table = *td

		txn := client.NewTxn(context.Background(), *kvDB)

		in := &RowBuffer{}
		for _, row := range c.input {
			encRow := make(sqlbase.EncDatumRow, len(row))
			for i, d := range row {
				encRow[i].SetDatum(sqlbase.ColumnType_INT, d)
			}
			in.rows = append(in.rows, encRow)
		}

		out := &RowBuffer{}
		jr, err := newJoinReader(&js, txn, in, out, &parser.EvalContext{})
		if err != nil {
			t.Fatal(err)
		}

		jr.Run(nil)

		if out.err != nil {
			t.Fatal(out.err)
		}
		if !in.done {
			t.Fatal("joinReader stopped accepting rows")
		}
		if !out.closed {
			t.Fatalf("output RowReceiver not closed")
		}
		if result := out.rows.String(); result != c.expected {
			t.Errorf("invalid results: %s, expected %s'", result, c.expected)
		}
	}
}
func TestOperationsWithIndexMutation(t *testing.T) {
	defer leaktest.AfterTest(t)()
	// The descriptor changes made must have an immediate effect.
	defer csql.TestDisableTableLeases()()
	// Disable external processing of mutations.
	params, _ := createTestServerParams()
	params.Knobs.SQLSchemaChangeManager = &csql.SchemaChangeManagerTestingKnobs{
		AsyncSchemaChangerExecNotification: schemaChangeManagerDisabled,
	}
	server, sqlDB, kvDB := serverutils.StartServer(t, params)
	defer server.Stopper().Stop()

	if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR, INDEX foo (v));
`); err != nil {
		t.Fatal(err)
	}

	// read table descriptor
	tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test")

	mTest := mutationTest{
		T:         t,
		kvDB:      kvDB,
		sqlDB:     sqlDB,
		tableDesc: tableDesc,
	}

	starQuery := `SELECT * FROM t.test`
	indexQuery := `SELECT v FROM t.test@foo`
	// See the effect of the operations depending on the state.
	for _, state := range []sqlbase.DescriptorMutation_State{sqlbase.DescriptorMutation_DELETE_ONLY, sqlbase.DescriptorMutation_WRITE_ONLY} {
		// Init table with some entries.
		if _, err := sqlDB.Exec(`TRUNCATE TABLE t.test`); err != nil {
			t.Fatal(err)
		}
		initRows := [][]string{{"a", "z"}, {"b", "y"}}
		for _, row := range initRows {
			if _, err := sqlDB.Exec(`INSERT INTO t.test VALUES ($1, $2)`, row[0], row[1]); err != nil {
				t.Fatal(err)
			}
		}
		_ = mTest.checkQueryResponse(starQuery, initRows)
		// Index foo is visible.
		_ = mTest.checkQueryResponse(indexQuery, [][]string{{"y"}, {"z"}})

		// Index foo is invisible once it's a mutation.
		mTest.writeIndexMutation("foo", sqlbase.DescriptorMutation{State: state})
		if _, err := sqlDB.Query(indexQuery); !testutils.IsError(err, `index "foo" not found`) {
			t.Fatal(err)
		}

		// Insert a new entry.
		if _, err := sqlDB.Exec(`INSERT INTO t.test VALUES ('c', 'x')`); err != nil {
			t.Fatal(err)
		}
		_ = mTest.checkQueryResponse(starQuery, [][]string{{"a", "z"}, {"b", "y"}, {"c", "x"}})

		// Make index "foo" live so that we can read it.
		mTest.makeMutationsActive()
		if state == sqlbase.DescriptorMutation_DELETE_ONLY {
			// "x" didn't get added to the index.
			_ = mTest.checkQueryResponse(indexQuery, [][]string{{"y"}, {"z"}})
		} else {
			// "x" got added to the index.
			_ = mTest.checkQueryResponse(indexQuery, [][]string{{"x"}, {"y"}, {"z"}})
		}

		// Make "foo" a mutation.
		mTest.writeIndexMutation("foo", sqlbase.DescriptorMutation{State: state})
		// Update.
		if _, err := sqlDB.Exec(`UPDATE t.test SET v = 'w' WHERE k = 'c'`); err != nil {
			t.Fatal(err)
		}
		// Update "v" to its current value "z" in row "a".
		if _, err := sqlDB.Exec(`UPDATE t.test SET v = 'z' WHERE k = 'a'`); err != nil {
			t.Fatal(err)
		}
		_ = mTest.checkQueryResponse(starQuery, [][]string{{"a", "z"}, {"b", "y"}, {"c", "w"}})

		// Make index "foo" live so that we can read it.
		mTest.makeMutationsActive()
		if state == sqlbase.DescriptorMutation_DELETE_ONLY {
			// updating "x" -> "w" is a noop on the index,
			// updating "z" -> "z" results in "z" being deleted from the index.
			_ = mTest.checkQueryResponse(indexQuery, [][]string{{"y"}, {"z"}})
		} else {
			// updating "x" -> "w" results in the index updating from "x" -> "w",
			// updating "z" -> "z" is a noop on the index.
			_ = mTest.checkQueryResponse(indexQuery, [][]string{{"w"}, {"y"}, {"z"}})
		}

		// Make "foo" a mutation.
		mTest.writeIndexMutation("foo", sqlbase.DescriptorMutation{State: state})
		// Delete row "b".
		if _, err := sqlDB.Exec(`DELETE FROM t.test WHERE k = 'b'`); err != nil {
			t.Fatal(err)
		}
		_ = mTest.checkQueryResponse(starQuery, [][]string{{"a", "z"}, {"c", "w"}})

		// Make index "foo" live so that we can read it.
		mTest.makeMutationsActive()
		// Deleting row "b" deletes "y" from the index.
		if state == sqlbase.DescriptorMutation_DELETE_ONLY {
			mTest.checkQueryResponse(indexQuery, [][]string{{"z"}})
		} else {
			mTest.checkQueryResponse(indexQuery, [][]string{{"w"}, {"z"}})
		}
	}

	// Check that a mutation can only be inserted with an explicit mutation state.
	tableDesc = mTest.tableDesc
	tableDesc.Mutations = []sqlbase.DescriptorMutation{{Descriptor_: &sqlbase.DescriptorMutation_Index{Index: &tableDesc.Indexes[len(tableDesc.Indexes)-1]}}}
	tableDesc.Indexes = tableDesc.Indexes[:len(tableDesc.Indexes)-1]
	if err := tableDesc.Validate(); !testutils.IsError(err, "mutation in state UNKNOWN, direction NONE, index foo, id 2") {
		t.Fatal(err)
	}
}
// TestOperationsWithUniqueColumnMutation tests all the operations while an
// index mutation refers to a column mutation.
func TestOperationsWithUniqueColumnMutation(t *testing.T) {
	defer leaktest.AfterTest(t)()
	// The descriptor changes made must have an immediate effect
	// so disable leases on tables.
	defer csql.TestDisableTableLeases()()
	// Disable external processing of mutations.
	params, _ := createTestServerParams()
	params.Knobs.SQLSchemaChangeManager = &csql.SchemaChangeManagerTestingKnobs{
		AsyncSchemaChangerExecNotification: schemaChangeManagerDisabled,
	}
	server, sqlDB, kvDB := serverutils.StartServer(t, params)
	defer server.Stopper().Stop()

	// Create a table with column i and an index on v and i.
	if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR, i CHAR, INDEX foo (i, v));
`); err != nil {
		t.Fatal(err)
	}

	// read table descriptor
	tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test")

	mTest := mutationTest{
		T:         t,
		kvDB:      kvDB,
		sqlDB:     sqlDB,
		tableDesc: tableDesc,
	}

	starQuery := `SELECT * FROM t.test`
	indexQuery := `SELECT i FROM t.test@foo`
	// Run the tests for both states for a column and an index.
	for _, state := range []sqlbase.DescriptorMutation_State{
		sqlbase.DescriptorMutation_DELETE_ONLY,
		sqlbase.DescriptorMutation_WRITE_ONLY,
	} {
		for _, idxState := range []sqlbase.DescriptorMutation_State{
			sqlbase.DescriptorMutation_DELETE_ONLY,
			sqlbase.DescriptorMutation_WRITE_ONLY,
		} {
			// Ignore the impossible column in DELETE_ONLY state while index
			// is in the WRITE_ONLY state.
			if state == sqlbase.DescriptorMutation_DELETE_ONLY &&
				idxState == sqlbase.DescriptorMutation_WRITE_ONLY {
				continue
			}
			// Init table to start state.
			if _, err := sqlDB.Exec(`TRUNCATE TABLE t.test`); err != nil {
				t.Fatal(err)
			}
			initRows := [][]string{{"a", "z", "q"}, {"b", "y", "r"}}
			for _, row := range initRows {
				if _, err := sqlDB.Exec(`INSERT INTO t.test VALUES ($1, $2, $3)`, row[0], row[1], row[2]); err != nil {
					t.Fatal(err)
				}
			}
			// Check that the table only contains the initRows.
			_ = mTest.checkQueryResponse(starQuery, initRows)

			// Add index "foo" as a mutation.
			mTest.writeIndexMutation("foo", sqlbase.DescriptorMutation{State: idxState})
			// Make column "i" a mutation.
			mTest.writeColumnMutation("i", sqlbase.DescriptorMutation{State: state})

			// Insert a row into the table.
			if _, err := sqlDB.Exec(`INSERT INTO t.test VALUES ('c', 'x')`); err != nil {
				t.Error(err)
			}

			// Make column "i" and index "foo" live.
			mTest.makeMutationsActive()
			// column "i" has no entry.
			_ = mTest.checkQueryResponse(starQuery, [][]string{{"a", "z", "q"}, {"b", "y", "r"}, {"c", "x", "NULL"}})
			if idxState == sqlbase.DescriptorMutation_DELETE_ONLY {
				// No index entry for row "c"
				_ = mTest.checkQueryResponse(indexQuery, [][]string{{"q"}, {"r"}})
			} else {
				// Index entry for row "c"
				_ = mTest.checkQueryResponse(indexQuery, [][]string{{"NULL"}, {"q"}, {"r"}})
			}

			// Add index "foo" as a mutation.
			mTest.writeIndexMutation("foo", sqlbase.DescriptorMutation{State: idxState})
			// Make column "i" a mutation.
			mTest.writeColumnMutation("i", sqlbase.DescriptorMutation{State: state})

			// Updating column "i" for a row fails.
			if _, err := sqlDB.Exec(`UPDATE t.test SET (v, i) = ('u', 'u') WHERE k = 'a'`); !testutils.IsError(err, `column "i" does not exist`) {
				t.Error(err)
			}

			// Update a row without specifying  mutation column "i".
			if _, err := sqlDB.Exec(`UPDATE t.test SET v = 'u' WHERE k = 'a'`); err != nil {
				t.Error(err)
			}
			// Make column "i" and index "foo" live.
			mTest.makeMutationsActive()

			// The update to column "v" is seen; there is no effect on column "i".
			_ = mTest.checkQueryResponse(starQuery, [][]string{{"a", "u", "q"}, {"b", "y", "r"}, {"c", "x", "NULL"}})
			if idxState == sqlbase.DescriptorMutation_DELETE_ONLY {
				// Index entry for row "a" is deleted.
				_ = mTest.checkQueryResponse(indexQuery, [][]string{{"r"}})
			} else {
				// No change in index "foo"
				_ = mTest.checkQueryResponse(indexQuery, [][]string{{"NULL"}, {"q"}, {"r"}})
			}

			// Add index "foo" as a mutation.
			mTest.writeIndexMutation("foo", sqlbase.DescriptorMutation{State: idxState})
			// Make column "i" a mutation.
			mTest.writeColumnMutation("i", sqlbase.DescriptorMutation{State: state})

			// Delete row "b".
			if _, err := sqlDB.Exec(`DELETE FROM t.test WHERE k = 'b'`); err != nil {
				t.Error(err)
			}
			// Make column "i" and index "foo" live.
			mTest.makeMutationsActive()
			// Row "b" is deleted. numVals is the number of non-NULL values seen,
			// or the number of KV values belonging to all the rows in the table
			// excluding row "b" since it's deleted.
			numVals := mTest.checkQueryResponse(starQuery, [][]string{{"a", "u", "q"}, {"c", "x", "NULL"}})
			// idxVals is the number of index values seen.
			var idxVals int
			if idxState == sqlbase.DescriptorMutation_DELETE_ONLY {
				// Index entry for row "b" is deleted.
				idxVals = mTest.checkQueryResponse(indexQuery, [][]string{})
			} else {
				// Index entry for row "b" is deleted. idxVals doesn't account for
				// the NULL value seen.
				idxVals = mTest.checkQueryResponse(indexQuery, [][]string{{"NULL"}, {"q"}})
				// Increment idxVals to account for the NULL value seen above.
				idxVals++
			}
			// Check that there are no hidden KV values for row "b", and column
			// "i" for row "b" was deleted. Also check that the index values are
			// all accounted for.
			mTest.checkTableSize(numVals + idxVals)
		}
	}
}
Esempio n. 22
0
// TestSchemaChangeReverseMutations tests that schema changes get reversed
// correctly when one of them violates a constraint.
func TestSchemaChangeReverseMutations(t *testing.T) {
	defer leaktest.AfterTest(t)()
	params, _ := createTestServerParams()
	// Disable synchronous schema change processing so that the mutations get
	// processed asynchronously.
	var enableAsyncSchemaChanges uint32
	params.Knobs = base.TestingKnobs{
		SQLExecutor: &csql.ExecutorTestingKnobs{
			SyncSchemaChangersFilter: func(tscc csql.TestingSchemaChangerCollection) {
				tscc.ClearSchemaChangers()
			},
		},
		SQLSchemaChangeManager: &csql.SchemaChangeManagerTestingKnobs{
			AsyncSchemaChangerExecNotification: func() error {
				if enable := atomic.LoadUint32(&enableAsyncSchemaChanges); enable == 0 {
					return errors.New("async schema changes are disabled")
				}
				return nil
			},
			AsyncSchemaChangerExecQuickly: true,
		},
	}
	s, sqlDB, kvDB := serverutils.StartServer(t, params)
	defer s.Stopper().Stop()

	// Create a k-v table.
	if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (k INT PRIMARY KEY, v INT);
`); err != nil {
		t.Fatal(err)
	}

	// Add some data
	maxValue := csql.IndexBackfillChunkSize + 1
	insert := fmt.Sprintf(`INSERT INTO t.test VALUES (%d, %d)`, 0, maxValue)
	for i := 1; i <= maxValue; i++ {
		insert += fmt.Sprintf(` ,(%d, %d)`, i, maxValue-i)
	}
	if _, err := sqlDB.Exec(insert); err != nil {
		t.Fatal(err)
	}

	// Create a column that is not NULL. This schema change doesn't return an
	// error only because we've turned off the synchronous execution path; it
	// will eventually fail when run by the asynchronous path.
	if _, err := sqlDB.Exec(`ALTER TABLE t.test ADD a INT NOT NULL, ADD c INT`); err != nil {
		t.Fatal(err)
	}

	// Add an index over a column that will be purged. This index will
	// eventually not get added.
	if _, err := sqlDB.Exec(`CREATE UNIQUE INDEX idx_a ON t.test (a)`); err != nil {
		t.Fatal(err)
	}

	// The purge of column 'a' doesn't influence these schema changes.

	// Drop column 'v' moves along just fine. The constraint 'foo' will not be
	// enforced because c is not added.
	if _, err := sqlDB.Exec(
		`ALTER TABLE t.test DROP v, ADD CONSTRAINT foo UNIQUE (c)`,
	); err != nil {
		t.Fatal(err)
	}

	// Add unique column 'b' moves along creating column b and the index on
	// it.
	if _, err := sqlDB.Exec(`ALTER TABLE t.test ADD b INT UNIQUE`); err != nil {
		t.Fatal(err)
	}

	tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test")
	if e := 7; e != len(tableDesc.Mutations) {
		t.Fatalf("e = %d, v = %d", e, len(tableDesc.Mutations))
	}

	// Enable async schema change processing.
	atomic.StoreUint32(&enableAsyncSchemaChanges, 1)

	// Wait until all the mutations have been processed.
	var rows *gosql.Rows
	expectedCols := []string{"k", "b"}
	util.SucceedsSoon(t, func() error {
		// Read table descriptor.
		tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test")
		if len(tableDesc.Mutations) > 0 {
			return errors.Errorf("%d mutations remaining", len(tableDesc.Mutations))
		}

		// Verify that t.test has the expected data. Read the table data while
		// ensuring that the correct table lease is in use.
		var err error
		rows, err = sqlDB.Query(`SELECT * from t.test`)
		if err != nil {
			t.Fatal(err)
		}
		cols, err := rows.Columns()
		if err != nil {
			t.Fatal(err)
		}

		// Ensure that sql is using the correct table lease.
		if len(cols) != len(expectedCols) {
			return errors.Errorf("incorrect columns: %v, expected: %v", cols, expectedCols)
		}
		if cols[0] != expectedCols[0] || cols[1] != expectedCols[1] {
			t.Fatalf("incorrect columns: %v", cols)
		}
		return nil
	})

	// rows contains the data; verify that it's the right data.
	vals := make([]interface{}, len(expectedCols))
	for i := range vals {
		vals[i] = new(interface{})
	}
	var count int64
	for ; rows.Next(); count++ {
		if err := rows.Scan(vals...); err != nil {
			t.Errorf("row %d scan failed: %s", count, err)
			continue
		}
		for j, v := range vals {
			if j == 0 {
				if val := *v.(*interface{}); val != nil {
					switch k := val.(type) {
					case int64:
						if count != k {
							t.Errorf("k = %d, expected %d", k, count)
						}

					default:
						t.Errorf("error input of type %T", k)
					}
				} else {
					t.Error("received nil value for column 'k'")
				}
			} else {
				if val := *v.(*interface{}); val != nil {
					t.Error("received non NULL value for column 'b'")
				}
			}
		}
	}
	if err := rows.Err(); err != nil {
		t.Fatal(err)
	}
	if eCount := int64(maxValue + 1); eCount != count {
		t.Fatalf("read the wrong number of rows: e = %d, v = %d", eCount, count)
	}

	// Check that the index on b eventually goes live even though a schema
	// change in front of it in the queue got purged.
	rows, err := sqlDB.Query(`SELECT * from t.test@test_b_key`)
	if err != nil {
		t.Fatal(err)
	}
	count = 0
	for ; rows.Next(); count++ {
	}
	if err := rows.Err(); err != nil {
		t.Fatal(err)
	}
	if eCount := int64(maxValue + 1); eCount != count {
		t.Fatalf("read the wrong number of rows: e = %d, v = %d", eCount, count)
	}

	// Check that the index on c gets purged.
	if _, err = sqlDB.Query(`SELECT * from t.test@foo`); err == nil {
		t.Fatal("SELECT over index 'foo' works")
	}

	// Check that the number of k-v pairs is accurate.
	tablePrefix := roachpb.Key(keys.MakeTablePrefix(uint32(tableDesc.ID)))
	tableEnd := tablePrefix.PrefixEnd()
	if kvs, err := kvDB.Scan(tablePrefix, tableEnd, 0); err != nil {
		t.Fatal(err)
	} else if e := 2 * (maxValue + 1); len(kvs) != e {
		t.Fatalf("expected %d key value pairs, but got %d", e, len(kvs))
	}
}
// TestTableMutationQueue tests that schema elements when added are
// assigned the correct start state and mutation id.
func TestTableMutationQueue(t *testing.T) {
	defer leaktest.AfterTest(t)()
	// Disable synchronous and asynchronous schema change processing so that
	// the mutations get queued up.
	params, _ := createTestServerParams()
	params.Knobs = base.TestingKnobs{
		SQLExecutor: &csql.ExecutorTestingKnobs{
			SyncSchemaChangersFilter: func(tscc csql.TestingSchemaChangerCollection) {
				tscc.ClearSchemaChangers()
			},
		},
		SQLSchemaChangeManager: &csql.SchemaChangeManagerTestingKnobs{
			AsyncSchemaChangerExecNotification: schemaChangeManagerDisabled,
		},
	}
	server, sqlDB, kvDB := serverutils.StartServer(t, params)
	defer server.Stopper().Stop()

	// Create a table with column i and an index on v and i.
	if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR);
`); err != nil {
		t.Fatal(err)
	}

	// Run some schema changes.

	// This single command creates three columns and two indexes sharing the
	// same mutation ID.
	if _, err := sqlDB.Exec(
		`ALTER TABLE t.test ADD d INT UNIQUE, ADD e INT UNIQUE, ADD f INT`,
	); err != nil {
		t.Fatal(err)
	}

	// This command creates two mutations sharing the same mutation ID.
	if _, err := sqlDB.Exec(
		`ALTER TABLE t.test ADD g INT, ADD CONSTRAINT idx_f UNIQUE (f)`,
	); err != nil {
		t.Fatal(err)
	}

	// This command creates a single mutation.
	if _, err := sqlDB.Exec(`CREATE UNIQUE INDEX idx_g ON t.test (g)`); err != nil {
		t.Fatal(err)
	}

	// This command created a drop mutation.
	if _, err := sqlDB.Exec(`ALTER TABLE t.test DROP v`); err != nil {
		t.Fatal(err)
	}

	// read table descriptor
	tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test")

	expected := []struct {
		name  string
		id    sqlbase.MutationID
		state sqlbase.DescriptorMutation_State
	}{
		{"d", 1, sqlbase.DescriptorMutation_DELETE_ONLY},
		{"test_d_key", 1, sqlbase.DescriptorMutation_DELETE_ONLY},
		{"e", 1, sqlbase.DescriptorMutation_DELETE_ONLY},
		{"test_e_key", 1, sqlbase.DescriptorMutation_DELETE_ONLY},
		{"f", 1, sqlbase.DescriptorMutation_DELETE_ONLY},
		// Second schema change.
		{"g", 2, sqlbase.DescriptorMutation_DELETE_ONLY},
		{"idx_f", 2, sqlbase.DescriptorMutation_DELETE_ONLY},
		// Third.
		{"idx_g", 3, sqlbase.DescriptorMutation_DELETE_ONLY},
		// Drop mutations start off in the WRITE_ONLY state.
		{"v", 4, sqlbase.DescriptorMutation_WRITE_ONLY},
	}

	if len(tableDesc.Mutations) != len(expected) {
		t.Fatalf("%d mutations, instead of expected %d", len(tableDesc.Mutations), len(expected))
	}

	for i, m := range tableDesc.Mutations {
		name := expected[i].name
		if col := m.GetColumn(); col != nil {
			if col.Name != name {
				t.Errorf("%d entry: name %s, expected %s", i, col.Name, name)
			}
		}
		if idx := m.GetIndex(); idx != nil {
			if idx.Name != name {
				t.Errorf("%d entry: name %s, expected %s", i, idx.Name, name)
			}
		}
		if id := expected[i].id; m.MutationID != id {
			t.Errorf("%d entry: id %d, expected %d", i, m.MutationID, id)
		}
		if state := expected[i].state; m.State != state {
			t.Errorf("%d entry: state %s, expected %s", i, m.State, state)
		}
	}
}
Esempio n. 24
0
// Test that there's no deadlock between AcquireByName and Release.
// We used to have one due to lock inversion between the tableNameCache lock and
// the leaseState lock, triggered when the same lease was Release()d after the
// table had been deleted (which means it's removed from the tableNameCache) and
// AcquireByName()d at the same time.
func TestReleaseAcquireByNameDeadlock(t *testing.T) {
	defer leaktest.AfterTest(t)()
	removalTracker := NewLeaseRemovalTracker()
	testingKnobs := base.TestingKnobs{
		SQLLeaseManager: &LeaseManagerTestingKnobs{
			LeaseStoreTestingKnobs: LeaseStoreTestingKnobs{
				LeaseReleasedEvent: removalTracker.LeaseRemovedNotification,
			},
		},
	}
	s, sqlDB, kvDB := serverutils.StartServer(
		t, base.TestServerArgs{Knobs: testingKnobs})
	defer s.Stopper().Stop()
	leaseManager := s.LeaseManager().(*LeaseManager)

	if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR);
`); err != nil {
		t.Fatal(err)
	}

	tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test")

	// Populate the name cache.
	var lease *LeaseState
	if err := kvDB.Txn(func(txn *client.Txn) error {
		var err error
		lease, err = leaseManager.AcquireByName(txn, tableDesc.ParentID, "test")
		return err
	}); err != nil {
		t.Fatal(err)
	}
	if err := leaseManager.Release(lease); err != nil {
		t.Fatal(err)
	}

	// Pretend the table has been deleted, so that when we release leases on it,
	// they are removed from the tableNameCache too.
	tableState := leaseManager.findTableState(tableDesc.ID, true)
	tableState.deleted = true

	// Try to trigger the race repeatedly: race an AcquireByName against a
	// Release.
	// leaseChan acts as a barrier, synchornizing the two routines at every
	// iteration.
	leaseChan := make(chan *LeaseState)
	errChan := make(chan error)
	go func() {
		for lease := range leaseChan {
			// Move errors to the main goroutine.
			errChan <- leaseManager.Release(lease)
		}
	}()

	for i := 0; i < 50; i++ {
		var leaseByName *LeaseState
		if err := kvDB.Txn(func(txn *client.Txn) error {
			var err error
			lease, err := leaseManager.AcquireByName(txn, tableDesc.ParentID, "test")
			if err != nil {
				t.Fatal(err)
			}
			// This test will need to wait until leases are removed from the store
			// before creating new leases because the jitter used in the leases'
			// expiration causes duplicate key errors when trying to create new
			// leases. This is not a problem in production, since leases are not
			// removed from the store until they expire, and the jitter is small
			// compared to their lifetime, but it is a problem in this test because
			// we churn through leases quickly.
			tracker := removalTracker.TrackRemoval(lease)
			// Start the race: signal the other guy to release, and we do another
			// acquire at the same time.
			leaseChan <- lease
			leaseByName, err = leaseManager.AcquireByName(txn, tableDesc.ParentID, "test")
			if err != nil {
				t.Fatal(err)
			}
			tracker2 := removalTracker.TrackRemoval(leaseByName)
			// See if there was an error releasing lease.
			err = <-errChan
			if err != nil {
				t.Fatal(err)
			}

			// Depending on how the race went, there are two cases - either the
			// AcquireByName ran first, and got the same lease as we already had,
			// or the Release ran first and so we got a new lease.
			if leaseByName == lease {
				if lease.Refcount() != 1 {
					t.Fatalf("expected refcount 1, got %d", lease.Refcount())
				}
				if err := leaseManager.Release(lease); err != nil {
					t.Fatal(err)
				}
				if err := tracker.WaitForRemoval(); err != nil {
					t.Fatal(err)
				}
			} else {
				if lease.Refcount() != 0 {
					t.Fatalf("expected refcount 0, got %d", lease.Refcount())
				}
				if err := leaseManager.Release(leaseByName); err != nil {
					t.Fatal(err)
				}
				if err := tracker2.WaitForRemoval(); err != nil {
					t.Fatal(err)
				}
			}
			return nil
		}); err != nil {
			t.Fatal(err)
		}
	}
	close(leaseChan)
}
Esempio n. 25
0
func TestManualReplication(t *testing.T) {
	defer leaktest.AfterTest(t)()

	tc := StartTestCluster(t, 3,
		base.TestClusterArgs{
			ReplicationMode: base.ReplicationManual,
			ServerArgs: base.TestServerArgs{
				UseDatabase: "t",
			},
		})
	defer tc.Stopper().Stop()

	s0 := sqlutils.MakeSQLRunner(t, tc.Conns[0])
	s1 := sqlutils.MakeSQLRunner(t, tc.Conns[1])
	s2 := sqlutils.MakeSQLRunner(t, tc.Conns[2])

	s0.Exec(`CREATE DATABASE t`)
	s0.Exec(`CREATE TABLE test (k INT PRIMARY KEY, v INT)`)
	s0.Exec(`INSERT INTO test VALUES (5, 1), (4, 2), (1, 2)`)

	if r := s1.Query(`SELECT * FROM test WHERE k = 5`); !r.Next() {
		t.Fatal("no rows")
	}

	s2.ExecRowsAffected(3, `DELETE FROM test`)

	// Split the table to a new range.
	kvDB := tc.Servers[0].DB()
	tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test")

	tableStartKey := keys.MakeRowSentinelKey(keys.MakeTablePrefix(uint32(tableDesc.ID)))
	leftRangeDesc, tableRangeDesc, err := tc.SplitRange(tableStartKey)
	if err != nil {
		t.Fatal(err)
	}
	log.Infof(context.Background(), "After split got ranges: %+v and %+v.", leftRangeDesc, tableRangeDesc)
	if len(tableRangeDesc.Replicas) == 0 {
		t.Fatalf(
			"expected replica on node 1, got no replicas: %+v", tableRangeDesc.Replicas)
	}
	if tableRangeDesc.Replicas[0].NodeID != 1 {
		t.Fatalf(
			"expected replica on node 1, got replicas: %+v", tableRangeDesc.Replicas)
	}

	// Replicate the table's range to all the nodes.
	tableRangeDesc, err = tc.AddReplicas(
		tableRangeDesc.StartKey.AsRawKey(), tc.Target(1), tc.Target(2),
	)
	if err != nil {
		t.Fatal(err)
	}
	if len(tableRangeDesc.Replicas) != 3 {
		t.Fatalf("expected 3 replicas, got %+v", tableRangeDesc.Replicas)
	}
	for i := 0; i < 3; i++ {
		if _, ok := tableRangeDesc.GetReplicaDescriptor(
			tc.Servers[i].GetFirstStoreID()); !ok {
			t.Fatalf("expected replica on store %d, got %+v",
				tc.Servers[i].GetFirstStoreID(), tableRangeDesc.Replicas)
		}
	}

	// Transfer the lease to node 1.
	leaseHolder, err := tc.FindRangeLeaseHolder(
		tableRangeDesc,
		&ReplicationTarget{
			NodeID:  tc.Servers[0].GetNode().Descriptor.NodeID,
			StoreID: tc.Servers[0].GetFirstStoreID(),
		})
	if err != nil {
		t.Fatal(err)
	}
	if leaseHolder.StoreID != tc.Servers[0].GetFirstStoreID() {
		t.Fatalf("expected initial lease on server idx 0, but is on node: %+v",
			leaseHolder)
	}

	err = tc.TransferRangeLease(tableRangeDesc, tc.Target(1))
	if err != nil {
		t.Fatal(err)
	}

	// Check that the lease holder has changed. We'll use a bogus hint, which
	// shouldn't matter (other than ensuring that it's not this call that moves
	// the range holder, but that a holder already existed).
	leaseHolder, err = tc.FindRangeLeaseHolder(
		tableRangeDesc,
		&ReplicationTarget{
			NodeID:  tc.Servers[0].GetNode().Descriptor.NodeID,
			StoreID: tc.Servers[0].GetFirstStoreID(),
		})
	if err != nil {
		t.Fatal(err)
	}
	if leaseHolder.StoreID != tc.Servers[1].GetFirstStoreID() {
		t.Fatalf("expected lease on server idx 1 (node: %d store: %d), but is on node: %+v",
			tc.Servers[1].GetNode().Descriptor.NodeID,
			tc.Servers[1].GetFirstStoreID(),
			leaseHolder)
	}
}
Esempio n. 26
0
func TestClusterFlow(t *testing.T) {
	defer leaktest.AfterTest(t)()
	const numRows = 100

	args := base.TestClusterArgs{ReplicationMode: base.ReplicationManual}
	tc := serverutils.StartTestCluster(t, 3, args)
	defer tc.Stopper().Stop()

	sumDigitsFn := func(row int) parser.Datum {
		sum := 0
		for row > 0 {
			sum += row % 10
			row /= 10
		}
		return parser.NewDInt(parser.DInt(sum))
	}

	sqlutils.CreateTable(t, tc.ServerConn(0), "t",
		"num INT PRIMARY KEY, digitsum INT, numstr STRING, INDEX s (digitsum)",
		numRows,
		sqlutils.ToRowFn(sqlutils.RowIdxFn, sumDigitsFn, sqlutils.RowEnglishFn))

	kvDB := tc.Server(0).KVClient().(*client.DB)
	desc := sqlbase.GetTableDescriptor(kvDB, "test", "t")
	makeIndexSpan := func(start, end int) TableReaderSpan {
		var span roachpb.Span
		prefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(desc, desc.Indexes[0].ID))
		span.Key = append(prefix, encoding.EncodeVarintAscending(nil, int64(start))...)
		span.EndKey = append(span.EndKey, prefix...)
		span.EndKey = append(span.EndKey, encoding.EncodeVarintAscending(nil, int64(end))...)
		return TableReaderSpan{Span: span}
	}

	// Set up table readers on three hosts feeding data into a join reader on
	// the third host. This is a basic test for the distributed flow
	// infrastructure, including local and remote streams.
	//
	// Note that the ranges won't necessarily be local to the table readers, but
	// that doesn't matter for the purposes of this test.

	tr1 := TableReaderSpec{
		Table:         *desc,
		IndexIdx:      1,
		OutputColumns: []uint32{0, 1},
		Spans:         []TableReaderSpan{makeIndexSpan(0, 8)},
	}

	tr2 := TableReaderSpec{
		Table:         *desc,
		IndexIdx:      1,
		OutputColumns: []uint32{0, 1},
		Spans:         []TableReaderSpan{makeIndexSpan(8, 12)},
	}

	tr3 := TableReaderSpec{
		Table:         *desc,
		IndexIdx:      1,
		OutputColumns: []uint32{0, 1},
		Spans:         []TableReaderSpan{makeIndexSpan(12, 100)},
	}

	jr := JoinReaderSpec{
		Table:         *desc,
		OutputColumns: []uint32{2},
	}

	txn := client.NewTxn(context.Background(), *kvDB)
	fid := FlowID{uuid.MakeV4()}

	req1 := &SetupFlowRequest{Txn: txn.Proto}
	req1.Flow = FlowSpec{
		FlowID: fid,
		Processors: []ProcessorSpec{{
			Core: ProcessorCoreUnion{TableReader: &tr1},
			Output: []OutputRouterSpec{{
				Type: OutputRouterSpec_MIRROR,
				Streams: []StreamEndpointSpec{
					{Mailbox: &MailboxSpec{StreamID: 0, TargetAddr: tc.Server(2).ServingAddr()}},
				},
			}},
		}},
	}

	req2 := &SetupFlowRequest{Txn: txn.Proto}
	req2.Flow = FlowSpec{
		FlowID: fid,
		Processors: []ProcessorSpec{{
			Core: ProcessorCoreUnion{TableReader: &tr2},
			Output: []OutputRouterSpec{{
				Type: OutputRouterSpec_MIRROR,
				Streams: []StreamEndpointSpec{
					{Mailbox: &MailboxSpec{StreamID: 1, TargetAddr: tc.Server(2).ServingAddr()}},
				},
			}},
		}},
	}

	req3 := &SetupFlowRequest{Txn: txn.Proto}
	req3.Flow = FlowSpec{
		FlowID: fid,
		Processors: []ProcessorSpec{
			{
				Core: ProcessorCoreUnion{TableReader: &tr3},
				Output: []OutputRouterSpec{{
					Type: OutputRouterSpec_MIRROR,
					Streams: []StreamEndpointSpec{
						{LocalStreamID: LocalStreamID(0)},
					},
				}},
			},
			{
				Input: []InputSyncSpec{{
					Type:     InputSyncSpec_ORDERED,
					Ordering: Ordering{Columns: []Ordering_Column{{1, Ordering_Column_ASC}}},
					Streams: []StreamEndpointSpec{
						{Mailbox: &MailboxSpec{StreamID: 0}},
						{Mailbox: &MailboxSpec{StreamID: 1}},
						{LocalStreamID: LocalStreamID(0)},
					},
				}},
				Core: ProcessorCoreUnion{JoinReader: &jr},
				Output: []OutputRouterSpec{{
					Type:    OutputRouterSpec_MIRROR,
					Streams: []StreamEndpointSpec{{Mailbox: &MailboxSpec{SimpleResponse: true}}},
				}}},
		},
	}

	var clients []DistSQLClient
	for i := 0; i < 3; i++ {
		s := tc.Server(i)
		conn, err := s.RPCContext().GRPCDial(s.ServingAddr())
		if err != nil {
			t.Fatal(err)
		}
		clients = append(clients, NewDistSQLClient(conn))
	}

	ctx := context.Background()

	if log.V(1) {
		log.Infof(ctx, "Setting up flow on 0")
	}
	if resp, err := clients[0].SetupFlow(context.Background(), req1); err != nil {
		t.Fatal(err)
	} else if resp.Error != nil {
		t.Fatal(resp.Error)
	}

	if log.V(1) {
		log.Infof(ctx, "Setting up flow on 1")
	}
	if resp, err := clients[1].SetupFlow(context.Background(), req2); err != nil {
		t.Fatal(err)
	} else if resp.Error != nil {
		t.Fatal(resp.Error)
	}

	if log.V(1) {
		log.Infof(ctx, "Running flow on 2")
	}
	stream, err := clients[2].RunSimpleFlow(context.Background(), req3)
	if err != nil {
		t.Fatal(err)
	}

	var decoder StreamDecoder
	var rows sqlbase.EncDatumRows
	for {
		msg, err := stream.Recv()
		if err != nil {
			if err == io.EOF {
				break
			}
			t.Fatal(err)
		}
		err = decoder.AddMessage(msg)
		if err != nil {
			t.Fatal(err)
		}
		rows = testGetDecodedRows(t, &decoder, rows)
	}
	if done, trailerErr := decoder.IsDone(); !done {
		t.Fatal("stream not done")
	} else if trailerErr != nil {
		t.Fatal("error in the stream trailer:", trailerErr)
	}
	// The result should be all the numbers in string form, ordered by the
	// digit sum (and then by number).
	var results []string
	for sum := 1; sum <= 50; sum++ {
		for i := 1; i <= numRows; i++ {
			if int(*sumDigitsFn(i).(*parser.DInt)) == sum {
				results = append(results, fmt.Sprintf("['%s']", sqlutils.IntToEnglish(i)))
			}
		}
	}
	expected := strings.Join(results, " ")
	expected = "[" + expected + "]"
	if rowStr := rows.String(); rowStr != expected {
		t.Errorf("Result: %s\n Expected: %s\n", rowStr, expected)
	}
}
Esempio n. 27
0
// Test that changing a descriptor's name updates the name cache.
func TestNameCacheIsUpdated(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s, sqlDB, kvDB, cleanup := sqlutils.SetupServer(t)
	defer cleanup()
	leaseManager := s.LeaseManager().(*LeaseManager)

	if _, err := sqlDB.Exec(`
CREATE DATABASE t;
CREATE DATABASE t1;
CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR);
`); err != nil {
		t.Fatal(err)
	}

	// Populate the name cache.
	if _, err := sqlDB.Exec("SELECT * FROM t.test;"); err != nil {
		t.Fatal(err)
	}

	tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test")

	// Rename.
	if _, err := sqlDB.Exec("ALTER TABLE t.test RENAME TO t.test2;"); err != nil {
		t.Fatal(err)
	}

	// Check that the cache has been updated.
	if leaseManager.tableNames.get(tableDesc.ParentID, "test", s.Clock()) != nil {
		t.Fatalf("old name still in cache")
	}

	lease := leaseManager.tableNames.get(tableDesc.ParentID, "test2", s.Clock())
	if lease == nil {
		t.Fatalf("new name not found in cache")
	}
	if lease.ID != tableDesc.ID {
		t.Fatalf("new name has wrong ID: %d (expected: %d)", lease.ID, tableDesc.ID)
	}
	if err := leaseManager.Release(lease); err != nil {
		t.Fatal(err)
	}

	// Rename to a different database.
	if _, err := sqlDB.Exec("ALTER TABLE t.test2 RENAME TO t1.test2;"); err != nil {
		t.Fatal(err)
	}

	// Re-read the descriptor, to get the new ParentID.
	newTableDesc := sqlbase.GetTableDescriptor(kvDB, "t1", "test2")
	if tableDesc.ParentID == newTableDesc.ParentID {
		t.Fatalf("database didn't change")
	}

	// Check that the cache has been updated.
	if leaseManager.tableNames.get(tableDesc.ParentID, "test2", s.Clock()) != nil {
		t.Fatalf("old name still in cache")
	}

	lease = leaseManager.tableNames.get(newTableDesc.ParentID, "test2", s.Clock())
	if lease == nil {
		t.Fatalf("new name not found in cache")
	}
	if lease.ID != tableDesc.ID {
		t.Fatalf("new name has wrong ID: %d (expected: %d)", lease.ID, tableDesc.ID)
	}
	if err := leaseManager.Release(lease); err != nil {
		t.Fatal(err)
	}
}
Esempio n. 28
0
// Test that a SQL txn that resolved a name can keep resolving that name during
// its lifetime even after the table has been renamed.
// Also tests that the name of a renamed table cannot be reused until everybody
// has stopped using it. Otherwise, we'd have different transactions in the
// systems using a single name for different tables.
// Also tests that the old name cannot be used by node that doesn't have a lease
// on the old version even while the name mapping still exists.
func TestTxnCanStillResolveOldName(t *testing.T) {
	defer leaktest.AfterTest(t)()

	var lmKnobs LeaseManagerTestingKnobs
	// renameUnblocked is used to block the rename schema change until the test
	// doesn't need the old name->id mapping to exist anymore.
	renameUnblocked := make(chan interface{})
	serverParams := base.TestServerArgs{
		Knobs: base.TestingKnobs{
			SQLExecutor: &ExecutorTestingKnobs{
				SyncSchemaChangersRenameOldNameNotInUseNotification: func() {
					<-renameUnblocked
				},
			},
			SQLLeaseManager: &lmKnobs,
		}}
	var mu sync.Mutex
	var waitTableID sqlbase.ID
	// renamed is used to block until the node cannot get leases with the original
	// table name. It will be signaled once the table has been renamed and the update
	// about the new name has been processed. Moreover, not only does an update to
	// the name needs to have been received, but the version of the descriptor needs to
	// have also been incremented in order to guarantee that the node cannot get
	// leases using the old name (an update with the new name but the original
	// version is ignored by the leasing refresh mechanism).
	renamed := make(chan interface{})
	lmKnobs.TestingLeasesRefreshedEvent =
		func(cfg config.SystemConfig) {
			mu.Lock()
			defer mu.Unlock()
			if waitTableID != 0 {
				if isRenamed(waitTableID, "t2", 2, cfg) {
					close(renamed)
					waitTableID = 0
				}
			}
		}
	s, db, kvDB := serverutils.StartServer(t, serverParams)
	defer s.Stopper().Stop()

	sql := `
CREATE DATABASE test;
CREATE TABLE test.t (a INT PRIMARY KEY);
`
	_, err := db.Exec(sql)
	if err != nil {
		t.Fatal(err)
	}

	tableDesc := sqlbase.GetTableDescriptor(kvDB, "test", "t")
	mu.Lock()
	waitTableID = tableDesc.ID
	mu.Unlock()

	txn, err := db.Begin()
	if err != nil {
		t.Fatal(err)
	}

	// Run a command to make the transaction resolves the table name.
	if _, err := txn.Exec("SELECT * FROM test.t"); err != nil {
		t.Fatal(err)
	}

	// Concurrently, rename the table.
	threadDone := make(chan interface{})
	go func() {
		// The ALTER will commit and signal the main thread through `renamed`, but
		// the schema changer will remain blocked by the lease on the "t" version
		// held by the txn started above.
		if _, err := db.Exec("ALTER TABLE test.t RENAME TO test.t2"); err != nil {
			panic(err)
		}
		close(threadDone)
	}()

	// Block until the LeaseManager has processed the gossip update.
	<-renamed

	// Run another command in the transaction and make sure that we can still
	// resolve the table name.
	if _, err := txn.Exec("SELECT * FROM test.t"); err != nil {
		t.Fatal(err)
	}

	// Check that the name cannot be reused while somebody still has a lease on
	// the old one (the mechanism for ensuring this is that the entry for the old
	// name is not deleted from the database until the async schema changer checks
	// that there's no more leases on the old version).
	if _, err := db.Exec("CREATE TABLE test.t (a INT PRIMARY KEY)"); !testutils.IsError(
		err, `table "t" already exists`) {
		t.Fatal(err)
	}

	if err := txn.Commit(); err != nil {
		t.Fatal(err)
	}

	// Check that the old name is not usable outside of the transaction now
	// that the node doesn't have a lease on it anymore (committing the txn
	// should have released the lease on the version of the descriptor with the
	// old name), even thoudh the name mapping still exists.
	lease := s.LeaseManager().(*LeaseManager).tableNames.get(tableDesc.ID, "t", s.Clock())
	if lease != nil {
		t.Fatalf(`still have lease on "t"`)
	}
	if _, err := db.Exec("SELECT * FROM test.t"); !testutils.IsError(
		err, `table "test.t" does not exist`) {
		t.Fatal(err)
	}
	close(renameUnblocked)

	// Block until the thread doing the rename has finished, so the test can clean
	// up. It needed to wait for the transaction to release its lease.
	<-threadDone
}
Esempio n. 29
0
func TestServer(t *testing.T) {
	defer leaktest.AfterTest(t)()

	s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{})
	defer s.Stopper().Stop()
	conn, err := s.RPCContext().GRPCDial(s.ServingAddr())
	if err != nil {
		t.Fatal(err)
	}

	if _, err := sqlDB.Exec(`
		CREATE DATABASE test;
		CREATE TABLE test.t (a INT PRIMARY KEY, b INT);
		INSERT INTO test.t VALUES (1, 10), (2, 20), (3, 30);
	`); err != nil {
		t.Fatal(err)
	}

	td := sqlbase.GetTableDescriptor(kvDB, "test", "t")

	ts := TableReaderSpec{
		Table:         *td,
		IndexIdx:      0,
		Reverse:       false,
		Spans:         nil,
		Filter:        Expression{Expr: "$0 != 2"}, // a != 2
		OutputColumns: []uint32{0, 1},              // a
	}

	txn := client.NewTxn(context.Background(), *kvDB)

	req := &SetupFlowsRequest{Txn: txn.Proto}
	req.Flows = []FlowSpec{{
		Processors: []ProcessorSpec{{
			Core: ProcessorCoreUnion{TableReader: &ts},
			Output: []OutputRouterSpec{{
				Type:    OutputRouterSpec_MIRROR,
				Streams: []StreamEndpointSpec{{Mailbox: &MailboxSpec{SimpleResponse: true}}},
			}},
		}},
	}}

	distSQLClient := NewDistSQLClient(conn)
	stream, err := distSQLClient.RunSimpleFlow(context.Background(), req)
	if err != nil {
		t.Fatal(err)
	}
	var decoder StreamDecoder
	var rows sqlbase.EncDatumRows
	for {
		msg, err := stream.Recv()
		if err != nil {
			if err == io.EOF {
				break
			}
			t.Fatal(err)
		}
		err = decoder.AddMessage(msg)
		if err != nil {
			t.Fatal(err)
		}
		rows = testGetDecodedRows(t, &decoder, rows)
	}
	if done, trailerErr := decoder.IsDone(); !done {
		t.Fatal("stream not done")
	} else if trailerErr != nil {
		t.Fatal("error in the stream trailer:", trailerErr)
	}
	str := rows.String()
	expected := "[[1 10] [3 30]]"
	if str != expected {
		t.Errorf("invalid results: %s, expected %s'", str, expected)
	}
}
Esempio n. 30
0
// Test that once a table is marked as deleted, a lease's refcount dropping to 0
// means the lease is released immediately, as opposed to being released only
// when it expires.
func TestLeasesOnDeletedTableAreReleasedImmediately(t *testing.T) {
	defer leaktest.AfterTest(t)()

	var mu sync.Mutex
	clearSchemaChangers := false

	var waitTableID sqlbase.ID
	deleted := make(chan bool)

	ctx, _ := createTestServerContext()
	ctx.TestingKnobs = base.TestingKnobs{
		SQLExecutor: &csql.ExecutorTestingKnobs{
			SyncSchemaChangersFilter: func(tscc csql.TestingSchemaChangerCollection) {
				mu.Lock()
				defer mu.Unlock()
				if clearSchemaChangers {
					tscc.ClearSchemaChangers()
				}
			},
		},
		SQLLeaseManager: &csql.LeaseManagerTestingKnobs{
			TestingLeasesRefreshedEvent: func(cfg config.SystemConfig) {
				mu.Lock()
				defer mu.Unlock()
				if waitTableID != 0 {
					if isDeleted(waitTableID, cfg) {
						close(deleted)
						waitTableID = 0
					}
				}
			},
		},
		SQLSchemaChangeManager: &csql.SchemaChangeManagerTestingKnobs{
			AsyncSchemaChangerExecNotification: schemaChangeManagerDisabled,
		},
	}
	s, db, kvDB := setupWithContext(t, &ctx)
	defer cleanup(s, db)

	sql := `
CREATE DATABASE test;
CREATE TABLE test.t(a INT PRIMARY KEY);
`
	_, err := db.Exec(sql)
	if err != nil {
		t.Fatal(err)
	}

	tableDesc := sqlbase.GetTableDescriptor(kvDB, "test", "t")

	lease1, err := acquire(s.TestServer, tableDesc.ID, 0)
	if err != nil {
		t.Fatal(err)
	}
	lease2, err := acquire(s.TestServer, tableDesc.ID, 0)
	if err != nil {
		t.Fatal(err)
	}

	// Block schema changers so that the table we're about to DROP is not actually
	// dropped; it will be left in a "deleted" state.
	// Also install a way to wait for the config update to be processed.
	mu.Lock()
	clearSchemaChangers = true
	waitTableID = tableDesc.ID
	mu.Unlock()

	// DROP the table
	_, err = db.Exec(`DROP TABLE test.t`)
	if err != nil {
		t.Fatal(err)
	}

	// Block until the LeaseManager has processed the gossip update.
	<-deleted

	// We should still be able to acquire, because we have an active lease.
	lease3, err := acquire(s.TestServer, tableDesc.ID, 0)
	if err != nil {
		t.Fatal(err)
	}

	// Release everything.
	if err := s.LeaseManager().(*csql.LeaseManager).Release(lease1); err != nil {
		t.Fatal(err)
	}
	if err := s.LeaseManager().(*csql.LeaseManager).Release(lease2); err != nil {
		t.Fatal(err)
	}
	if err := s.LeaseManager().(*csql.LeaseManager).Release(lease3); err != nil {
		t.Fatal(err)
	}
	// Now we shouldn't be able to acquire any more.
	_, err = acquire(s.TestServer, tableDesc.ID, 0)
	if !testutils.IsError(err, "table is being deleted") {
		t.Fatalf("got a different error than expected: %s", err)
	}
}