func TestZoneConfigValidate(t *testing.T) { defer leaktest.AfterTest(t)() testCases := []struct { cfg config.ZoneConfig expected string }{ { config.ZoneConfig{}, "attributes for at least one replica must be specified in zone config", }, { config.ZoneConfig{ NumReplicas: 2, }, "at least 3 replicas are required for multi-replica configurations", }, { config.ZoneConfig{ NumReplicas: 1, }, "RangeMaxBytes 0 less than minimum allowed", }, { config.ZoneConfig{ NumReplicas: 1, RangeMaxBytes: config.DefaultZoneConfig().RangeMaxBytes, }, "", }, { config.ZoneConfig{ NumReplicas: 1, RangeMinBytes: config.DefaultZoneConfig().RangeMaxBytes, RangeMaxBytes: config.DefaultZoneConfig().RangeMaxBytes, }, "is greater than or equal to RangeMaxBytes", }, } for i, c := range testCases { err := c.cfg.Validate() if c.expected == "" { if err != nil { t.Fatalf("%d: expected success, but got %v", i, err) } } else if !testutils.IsError(err, c.expected) { t.Fatalf("%d: expected %s, but got %v", i, c.expected, err) } } }
// newRange returns a new range with the given rangeID. func newRange(rangeID roachpb.RangeID, allocator storage.Allocator) *Range { return &Range{ desc: roachpb.RangeDescriptor{ RangeID: rangeID, }, zone: config.DefaultZoneConfig(), replicas: make(map[roachpb.StoreID]replica), allocator: allocator, } }
func (t *parallelTest) setup(spec *parTestSpec) { if spec.ClusterSize == 0 { spec.ClusterSize = 1 } if testing.Verbose() || log.V(1) { log.Infof(t.ctx, "Cluster Size: %d", spec.ClusterSize) } args := base.TestClusterArgs{ ServerArgs: base.TestServerArgs{ Knobs: base.TestingKnobs{ SQLExecutor: &sql.ExecutorTestingKnobs{ WaitForGossipUpdate: true, CheckStmtStringChange: true, }, }, }, } t.cluster = serverutils.StartTestCluster(t, spec.ClusterSize, args) t.clients = make([][]*gosql.DB, spec.ClusterSize) for i := range t.clients { t.clients[i] = append(t.clients[i], t.cluster.ServerConn(i)) } r0 := sqlutils.MakeSQLRunner(t, t.clients[0][0]) if spec.RangeSplitSize != 0 { if testing.Verbose() || log.V(1) { log.Infof(t.ctx, "Setting range split size: %d", spec.RangeSplitSize) } zoneCfg := config.DefaultZoneConfig() zoneCfg.RangeMaxBytes = int64(spec.RangeSplitSize) zoneCfg.RangeMinBytes = zoneCfg.RangeMaxBytes / 2 buf, err := protoutil.Marshal(&zoneCfg) if err != nil { t.Fatal(err) } objID := keys.RootNamespaceID r0.Exec(`UPDATE system.zones SET config = $2 WHERE id = $1`, objID, buf) } if testing.Verbose() || log.V(1) { log.Infof(t.ctx, "Creating database") } r0.Exec("CREATE DATABASE test") for i := range t.clients { sqlutils.MakeSQLRunner(t, t.clients[i][0]).Exec("SET DATABASE = test") } if testing.Verbose() || log.V(1) { log.Infof(t.ctx, "Test setup done") } }
// TestRangeSplitsWithWritePressure sets the zone config max bytes for // a range to 256K and writes data until there are five ranges. func TestRangeSplitsWithWritePressure(t *testing.T) { defer leaktest.AfterTest(t)() // Override default zone config. cfg := config.DefaultZoneConfig() cfg.RangeMaxBytes = 1 << 18 defer config.TestingSetDefaultZoneConfig(cfg)() dbCtx := client.DefaultDBContext() dbCtx.TxnRetryOptions = retry.Options{ InitialBackoff: 1 * time.Millisecond, MaxBackoff: 10 * time.Millisecond, Multiplier: 2, } s, _ := createTestDBWithContext(t, dbCtx) // This is purely to silence log spam. config.TestingSetupZoneConfigHook(s.Stopper) defer s.Stop() // Start test writer write about a 32K/key so there aren't too many writes necessary to split 64K range. done := make(chan struct{}) var wg sync.WaitGroup wg.Add(1) go startTestWriter(s.DB, int64(0), 1<<15, &wg, nil, nil, done, t) // Check that we split 5 times in allotted time. testutils.SucceedsSoon(t, func() error { // Scan the txn records. rows, err := s.DB.Scan(context.TODO(), keys.Meta2Prefix, keys.MetaMax, 0) if err != nil { return errors.Errorf("failed to scan meta2 keys: %s", err) } if lr := len(rows); lr < 5 { return errors.Errorf("expected >= 5 scans; got %d", lr) } return nil }) close(done) wg.Wait() // This write pressure test often causes splits while resolve // intents are in flight, causing them to fail with range key // mismatch errors. However, LocalSender should retry in these // cases. Check here via MVCC scan that there are no dangling write // intents. We do this using a SucceedsSoon construct to account // for timing of finishing the test writer and a possibly-ongoing // asynchronous split. testutils.SucceedsSoon(t, func() error { if _, _, _, err := engine.MVCCScan(context.Background(), s.Eng, keys.LocalMax, roachpb.KeyMax, math.MaxInt64, hlc.MaxTimestamp, true, nil); err != nil { return errors.Errorf("failed to verify no dangling intents: %s", err) } return nil }) }
// Create the key/value pairs for the default zone config entry. func createDefaultZoneConfig() []roachpb.KeyValue { var ret []roachpb.KeyValue value := roachpb.Value{} desc := config.DefaultZoneConfig() if err := value.SetProto(&desc); err != nil { log.Fatalf(context.TODO(), "could not marshal %v", desc) } ret = append(ret, roachpb.KeyValue{ Key: MakeZoneKey(keys.RootNamespaceID), Value: value, }) return ret }
func TestSkipLargeReplicaSnapshot(t *testing.T) { defer leaktest.AfterTest(t)() storeCfg := TestStoreConfig(nil) storeCfg.TestingKnobs.DisableSplitQueue = true const snapSize = 5 * (keySize + valSize) cfg := config.DefaultZoneConfig() cfg.RangeMaxBytes = snapSize defer config.TestingSetDefaultZoneConfig(cfg)() stopper := stop.NewStopper() defer stopper.Stop() store := createTestStoreWithConfig(t, stopper, &storeCfg) rep, err := store.GetReplica(rangeID) if err != nil { t.Fatal(err) } rep.SetMaxBytes(snapSize) if pErr := rep.redirectOnOrAcquireLease(context.Background()); pErr != nil { t.Fatal(pErr) } if err := fillTestRange(rep, snapSize); err != nil { t.Fatal(err) } if _, err := rep.GetSnapshot(context.Background(), "test"); err != nil { t.Fatal(err) } rep.CloseOutSnap() if err := fillTestRange(rep, snapSize*2); err != nil { t.Fatal(err) } rep.mu.Lock() _, err = rep.Snapshot() rep.mu.Unlock() if err != raft.ErrSnapshotTemporarilyUnavailable { rep.mu.Lock() after := rep.mu.state.Stats.Total() rep.mu.Unlock() t.Fatalf( "snapshot of a very large range (%d / %d, needsSplit: %v, exceeds snap limit: %v) should fail but got %v", after, rep.GetMaxBytes(), rep.needsSplitBySize(), rep.exceedsDoubleSplitSizeLocked(), err, ) } }
// Start starts the TestServer by bootstrapping an in-memory store // (defaults to maximum of 100M). The server is started, launching the // node RPC server and all HTTP endpoints. Use the value of // TestServer.ServingAddr() after Start() for client connections. // Use TestServer.Stopper().Stop() to shutdown the server after the test // completes. func (ts *TestServer) Start(params base.TestServerArgs) error { if ts.Cfg == nil { panic("Cfg not set") } if params.Stopper == nil { params.Stopper = stop.NewStopper() } if !params.PartOfCluster { // Change the replication requirements so we don't get log spam about ranges // not being replicated enough. cfg := config.DefaultZoneConfig() cfg.NumReplicas = 1 fn := config.TestingSetDefaultZoneConfig(cfg) params.Stopper.AddCloser(stop.CloserFn(fn)) } // Needs to be called before NewServer to ensure resolvers are initialized. if err := ts.Cfg.InitNode(); err != nil { return err } var err error ts.Server, err = NewServer(*ts.Cfg, params.Stopper) if err != nil { return err } // Our context must be shared with our server. ts.Cfg = &ts.Server.cfg if err := ts.Server.Start(context.Background()); err != nil { return err } // If enabled, wait for initial splits to complete before returning control. // If initial splits do not complete, the server is stopped before // returning. if stk, ok := ts.cfg.TestingKnobs.Store.(*storage.StoreTestingKnobs); ok && stk.DisableSplitQueue { return nil } if err := ts.WaitForInitialSplits(); err != nil { ts.Stop() return err } return nil }
// UpdateZoneConfig updates the default zone config for the cluster. func (c *Cluster) UpdateZoneConfig(rangeMinBytes, rangeMaxBytes int64) { zone := config.DefaultZoneConfig() zone.RangeMinBytes = rangeMinBytes zone.RangeMaxBytes = rangeMaxBytes buf, err := protoutil.Marshal(&zone) if err != nil { log.Fatal(context.Background(), err) } _, err = c.DB[0].Exec(`UPSERT INTO system.zones (id, config) VALUES (0, $1)`, buf) if err != nil { log.Fatal(context.Background(), err) } }
// TableDetails is an endpoint that returns columns, indices, and other // relevant details for the specified table. func (s *adminServer) TableDetails( ctx context.Context, req *serverpb.TableDetailsRequest, ) (*serverpb.TableDetailsResponse, error) { args := sql.SessionArgs{User: s.getUser(req)} session := s.NewSessionForRPC(ctx, args) defer session.Finish(s.server.sqlExecutor) escDBName := parser.Name(req.Database).String() if err := s.assertNotVirtualSchema(escDBName); err != nil { return nil, err } // TODO(cdo): Use real placeholders for the table and database names when we've extended our SQL // grammar to allow that. escTableName := parser.Name(req.Table).String() escQualTable := fmt.Sprintf("%s.%s", escDBName, escTableName) query := fmt.Sprintf("SHOW COLUMNS FROM %s; SHOW INDEX FROM %s; SHOW GRANTS ON TABLE %s; SHOW CREATE TABLE %s;", escQualTable, escQualTable, escQualTable, escQualTable) r := s.server.sqlExecutor.ExecuteStatements(session, query, nil) defer r.Close() if err := s.firstNotFoundError(r.ResultList); err != nil { return nil, grpc.Errorf(codes.NotFound, "%s", err) } if err := s.checkQueryResults(r.ResultList, 4); err != nil { return nil, err } var resp serverpb.TableDetailsResponse // Marshal SHOW COLUMNS result. // // TODO(cdo): protobuf v3's default behavior for fields with zero values (e.g. empty strings) // is to suppress them. So, if protobuf field "foo" is an empty string, "foo" won't show // up in the marshalled JSON. I feel that this is counterintuitive, and this should be fixed // for our API. { const ( fieldCol = "Field" // column name typeCol = "Type" nullCol = "Null" defaultCol = "Default" ) scanner := makeResultScanner(r.ResultList[0].Columns) for i, nRows := 0, r.ResultList[0].Rows.Len(); i < nRows; i++ { row := r.ResultList[0].Rows.At(i) var col serverpb.TableDetailsResponse_Column if err := scanner.Scan(row, fieldCol, &col.Name); err != nil { return nil, err } if err := scanner.Scan(row, typeCol, &col.Type); err != nil { return nil, err } if err := scanner.Scan(row, nullCol, &col.Nullable); err != nil { return nil, err } isDefaultNull, err := scanner.IsNull(row, defaultCol) if err != nil { return nil, err } if !isDefaultNull { if err := scanner.Scan(row, defaultCol, &col.DefaultValue); err != nil { return nil, err } } resp.Columns = append(resp.Columns, col) } } // Marshal SHOW INDEX result. { const ( nameCol = "Name" uniqueCol = "Unique" seqCol = "Seq" columnCol = "Column" directionCol = "Direction" storingCol = "Storing" implicitCol = "Implicit" ) scanner := makeResultScanner(r.ResultList[1].Columns) for i, nRows := 0, r.ResultList[1].Rows.Len(); i < nRows; i++ { row := r.ResultList[1].Rows.At(i) // Marshal grant, splitting comma-separated privileges into a proper slice. var index serverpb.TableDetailsResponse_Index if err := scanner.Scan(row, nameCol, &index.Name); err != nil { return nil, err } if err := scanner.Scan(row, uniqueCol, &index.Unique); err != nil { return nil, err } if err := scanner.Scan(row, seqCol, &index.Seq); err != nil { return nil, err } if err := scanner.Scan(row, columnCol, &index.Column); err != nil { return nil, err } if err := scanner.Scan(row, directionCol, &index.Direction); err != nil { return nil, err } if err := scanner.Scan(row, storingCol, &index.Storing); err != nil { return nil, err } if err := scanner.Scan(row, implicitCol, &index.Implicit); err != nil { return nil, err } resp.Indexes = append(resp.Indexes, index) } } // Marshal SHOW GRANTS result. { const ( userCol = "User" privilegesCol = "Privileges" ) scanner := makeResultScanner(r.ResultList[2].Columns) for i, nRows := 0, r.ResultList[2].Rows.Len(); i < nRows; i++ { row := r.ResultList[2].Rows.At(i) // Marshal grant, splitting comma-separated privileges into a proper slice. var grant serverpb.TableDetailsResponse_Grant var privileges string if err := scanner.Scan(row, userCol, &grant.User); err != nil { return nil, err } if err := scanner.Scan(row, privilegesCol, &privileges); err != nil { return nil, err } grant.Privileges = strings.Split(privileges, ",") resp.Grants = append(resp.Grants, grant) } } // Marshal SHOW CREATE TABLE result. { const createTableCol = "CreateTable" showResult := r.ResultList[3] if showResult.Rows.Len() != 1 { return nil, s.serverErrorf("CreateTable response not available.") } scanner := makeResultScanner(showResult.Columns) var createStmt string if err := scanner.Scan(showResult.Rows.At(0), createTableCol, &createStmt); err != nil { return nil, err } resp.CreateTableStatement = createStmt } // Get the number of ranges in the table. We get the key span for the table // data. Then, we count the number of ranges that make up that key span. { iexecutor := sql.InternalExecutor{LeaseManager: s.server.leaseMgr} var tableSpan roachpb.Span if err := s.server.db.Txn(ctx, func(txn *client.Txn) error { var err error tableSpan, err = iexecutor.GetTableSpan( s.getUser(req), txn, req.Database, req.Table, ) return err }); err != nil { return nil, s.serverError(err) } tableRSpan := roachpb.RSpan{} var err error tableRSpan.Key, err = keys.Addr(tableSpan.Key) if err != nil { return nil, s.serverError(err) } tableRSpan.EndKey, err = keys.Addr(tableSpan.EndKey) if err != nil { return nil, s.serverError(err) } rangeCount, err := s.server.distSender.CountRanges(ctx, tableRSpan) if err != nil { return nil, s.serverError(err) } resp.RangeCount = rangeCount } // Query the descriptor ID and zone configuration for this table. { path, err := s.queryDescriptorIDPath(session, []string{req.Database, req.Table}) if err != nil { return nil, s.serverError(err) } resp.DescriptorID = int64(path[2]) id, zone, zoneExists, err := s.queryZonePath(session, path) if err != nil { return nil, s.serverError(err) } if !zoneExists { zone = config.DefaultZoneConfig() } resp.ZoneConfig = zone switch id { case path[1]: resp.ZoneConfigLevel = serverpb.ZoneConfigurationLevel_DATABASE case path[2]: resp.ZoneConfigLevel = serverpb.ZoneConfigurationLevel_TABLE default: resp.ZoneConfigLevel = serverpb.ZoneConfigurationLevel_CLUSTER } } return &resp, nil }
// DatabaseDetails is an endpoint that returns grants and a list of table names // for the specified database. func (s *adminServer) DatabaseDetails( ctx context.Context, req *serverpb.DatabaseDetailsRequest, ) (*serverpb.DatabaseDetailsResponse, error) { args := sql.SessionArgs{User: s.getUser(req)} session := s.NewSessionForRPC(ctx, args) defer session.Finish(s.server.sqlExecutor) escDBName := parser.Name(req.Database).String() if err := s.assertNotVirtualSchema(escDBName); err != nil { return nil, err } // Placeholders don't work with SHOW statements, so we need to manually // escape the database name. // // TODO(cdo): Use placeholders when they're supported by SHOW. query := fmt.Sprintf("SHOW GRANTS ON DATABASE %s; SHOW TABLES FROM %s;", escDBName, escDBName) r := s.server.sqlExecutor.ExecuteStatements(session, query, nil) defer r.Close() if err := s.firstNotFoundError(r.ResultList); err != nil { return nil, grpc.Errorf(codes.NotFound, "%s", err) } if err := s.checkQueryResults(r.ResultList, 2); err != nil { return nil, s.serverError(err) } // Marshal grants. var resp serverpb.DatabaseDetailsResponse { const ( userCol = "User" privilegesCol = "Privileges" ) scanner := makeResultScanner(r.ResultList[0].Columns) for i, nRows := 0, r.ResultList[0].Rows.Len(); i < nRows; i++ { row := r.ResultList[0].Rows.At(i) // Marshal grant, splitting comma-separated privileges into a proper slice. var grant serverpb.DatabaseDetailsResponse_Grant var privileges string if err := scanner.Scan(row, userCol, &grant.User); err != nil { return nil, err } if err := scanner.Scan(row, privilegesCol, &privileges); err != nil { return nil, err } grant.Privileges = strings.Split(privileges, ",") resp.Grants = append(resp.Grants, grant) } } // Marshal table names. { const tableCol = "Table" scanner := makeResultScanner(r.ResultList[1].Columns) if a, e := len(r.ResultList[1].Columns), 1; a != e { return nil, s.serverErrorf("show tables columns mismatch: %d != expected %d", a, e) } for i, nRows := 0, r.ResultList[1].Rows.Len(); i < nRows; i++ { row := r.ResultList[1].Rows.At(i) var tableName string if err := scanner.Scan(row, tableCol, &tableName); err != nil { return nil, err } resp.TableNames = append(resp.TableNames, tableName) } } // Query the descriptor ID and zone configuration for this database. { path, err := s.queryDescriptorIDPath(session, []string{req.Database}) if err != nil { return nil, s.serverError(err) } resp.DescriptorID = int64(path[1]) id, zone, zoneExists, err := s.queryZonePath(session, path) if err != nil { return nil, s.serverError(err) } if !zoneExists { zone = config.DefaultZoneConfig() } resp.ZoneConfig = zone switch id { case path[1]: resp.ZoneConfigLevel = serverpb.ZoneConfigurationLevel_DATABASE default: resp.ZoneConfigLevel = serverpb.ZoneConfigurationLevel_CLUSTER } } return &resp, nil }
// TestAdminAPIZoneDetails verifies the zone configuration information returned // for both DatabaseDetailsResponse AND TableDetailsResponse. func TestAdminAPIZoneDetails(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() ts := s.(*TestServer) // Create database and table. ac := log.AmbientContext{Tracer: tracing.NewTracer()} ctx, span := ac.AnnotateCtxWithSpan(context.Background(), "test") defer span.Finish() session := sql.NewSession( ctx, sql.SessionArgs{User: security.RootUser}, ts.sqlExecutor, nil, &sql.MemoryMetrics{}) session.StartUnlimitedMonitor() setupQueries := []string{ "CREATE DATABASE test", "CREATE TABLE test.tbl (val STRING)", } for _, q := range setupQueries { res := ts.sqlExecutor.ExecuteStatements(session, q, nil) defer res.Close() if res.ResultList[0].Err != nil { t.Fatalf("error executing '%s': %s", q, res.ResultList[0].Err) } } // Function to verify the zone for table "test.tbl" as returned by the Admin // API. verifyTblZone := func( expectedZone config.ZoneConfig, expectedLevel serverpb.ZoneConfigurationLevel, ) { var resp serverpb.TableDetailsResponse if err := getAdminJSONProto(s, "databases/test/tables/tbl", &resp); err != nil { t.Fatal(err) } if a, e := &resp.ZoneConfig, &expectedZone; !proto.Equal(a, e) { t.Errorf("actual table zone config %v did not match expected value %v", a, e) } if a, e := resp.ZoneConfigLevel, expectedLevel; a != e { t.Errorf("actual table ZoneConfigurationLevel %s did not match expected value %s", a, e) } if t.Failed() { t.FailNow() } } // Function to verify the zone for database "test" as returned by the Admin // API. verifyDbZone := func( expectedZone config.ZoneConfig, expectedLevel serverpb.ZoneConfigurationLevel, ) { var resp serverpb.DatabaseDetailsResponse if err := getAdminJSONProto(s, "databases/test", &resp); err != nil { t.Fatal(err) } if a, e := &resp.ZoneConfig, &expectedZone; !proto.Equal(a, e) { t.Errorf("actual db zone config %v did not match expected value %v", a, e) } if a, e := resp.ZoneConfigLevel, expectedLevel; a != e { t.Errorf("actual db ZoneConfigurationLevel %s did not match expected value %s", a, e) } if t.Failed() { t.FailNow() } } // Function to store a zone config for a given object ID. setZone := func(zoneCfg config.ZoneConfig, id sqlbase.ID) { zoneBytes, err := zoneCfg.Marshal() if err != nil { t.Fatal(err) } const query = `INSERT INTO system.zones VALUES($1, $2)` params := parser.NewPlaceholderInfo() params.SetValue(`1`, parser.NewDInt(parser.DInt(id))) params.SetValue(`2`, parser.NewDBytes(parser.DBytes(zoneBytes))) res := ts.sqlExecutor.ExecuteStatements(session, query, params) defer res.Close() if res.ResultList[0].Err != nil { t.Fatalf("error executing '%s': %s", query, res.ResultList[0].Err) } } // Verify zone matches cluster default. verifyDbZone(config.DefaultZoneConfig(), serverpb.ZoneConfigurationLevel_CLUSTER) verifyTblZone(config.DefaultZoneConfig(), serverpb.ZoneConfigurationLevel_CLUSTER) // Get ID path for table. This will be an array of three IDs, containing the ID of the root namespace, // the database, and the table (in that order). idPath, err := ts.admin.queryDescriptorIDPath(session, []string{"test", "tbl"}) if err != nil { t.Fatal(err) } // Apply zone configuration to database and check again. dbZone := config.ZoneConfig{ RangeMinBytes: 456, } setZone(dbZone, idPath[1]) verifyDbZone(dbZone, serverpb.ZoneConfigurationLevel_DATABASE) verifyTblZone(dbZone, serverpb.ZoneConfigurationLevel_DATABASE) // Apply zone configuration to table and check again. tblZone := config.ZoneConfig{ RangeMinBytes: 789, } setZone(tblZone, idPath[2]) verifyDbZone(dbZone, serverpb.ZoneConfigurationLevel_DATABASE) verifyTblZone(tblZone, serverpb.ZoneConfigurationLevel_TABLE) }
func TestDropDatabase(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := createTestServerParams() s, sqlDB, kvDB := serverutils.StartServer(t, params) defer s.Stopper().Stop() ctx := context.TODO() // Fix the column families so the key counts below don't change if the // family heuristics are updated. if _, err := sqlDB.Exec(` CREATE DATABASE t; CREATE TABLE t.kv (k CHAR PRIMARY KEY, v CHAR, FAMILY (k), FAMILY (v)); INSERT INTO t.kv VALUES ('c', 'e'), ('a', 'c'), ('b', 'd'); `); err != nil { t.Fatal(err) } dbNameKey := sqlbase.MakeNameMetadataKey(keys.RootNamespaceID, "t") r, err := kvDB.Get(ctx, dbNameKey) if err != nil { t.Fatal(err) } if !r.Exists() { t.Fatalf(`database "t" does not exist`) } dbDescKey := sqlbase.MakeDescMetadataKey(sqlbase.ID(r.ValueInt())) desc := &sqlbase.Descriptor{} if err := kvDB.GetProto(ctx, dbDescKey, desc); err != nil { t.Fatal(err) } dbDesc := desc.GetDatabase() tbNameKey := sqlbase.MakeNameMetadataKey(dbDesc.ID, "kv") gr, err := kvDB.Get(ctx, tbNameKey) if err != nil { t.Fatal(err) } if !gr.Exists() { t.Fatalf(`table "kv" does not exist`) } tbDescKey := sqlbase.MakeDescMetadataKey(sqlbase.ID(gr.ValueInt())) if err := kvDB.GetProto(ctx, tbDescKey, desc); err != nil { t.Fatal(err) } tbDesc := desc.GetTable() // Add a zone config for both the table and database. cfg := config.DefaultZoneConfig() buf, err := protoutil.Marshal(&cfg) if err != nil { t.Fatal(err) } if _, err := sqlDB.Exec(`INSERT INTO system.zones VALUES ($1, $2)`, tbDesc.ID, buf); err != nil { t.Fatal(err) } if _, err := sqlDB.Exec(`INSERT INTO system.zones VALUES ($1, $2)`, dbDesc.ID, buf); err != nil { t.Fatal(err) } tbZoneKey := sqlbase.MakeZoneKey(tbDesc.ID) dbZoneKey := sqlbase.MakeZoneKey(dbDesc.ID) if gr, err := kvDB.Get(ctx, tbZoneKey); err != nil { t.Fatal(err) } else if !gr.Exists() { t.Fatalf("table zone config entry not found") } if gr, err := kvDB.Get(ctx, dbZoneKey); err != nil { t.Fatal(err) } else if !gr.Exists() { t.Fatalf("database zone config entry not found") } tablePrefix := keys.MakeTablePrefix(uint32(tbDesc.ID)) tableStartKey := roachpb.Key(tablePrefix) tableEndKey := tableStartKey.PrefixEnd() if kvs, err := kvDB.Scan(ctx, tableStartKey, tableEndKey, 0); err != nil { t.Fatal(err) } else if l := 6; len(kvs) != l { t.Fatalf("expected %d key value pairs, but got %d", l, len(kvs)) } if _, err := sqlDB.Exec(`DROP DATABASE t`); err != nil { t.Fatal(err) } if kvs, err := kvDB.Scan(ctx, tableStartKey, tableEndKey, 0); err != nil { t.Fatal(err) } else if l := 0; len(kvs) != l { t.Fatalf("expected %d key value pairs, but got %d", l, len(kvs)) } if gr, err := kvDB.Get(ctx, tbDescKey); err != nil { t.Fatal(err) } else if gr.Exists() { t.Fatalf("table descriptor still exists after database is dropped: %q", tbDescKey) } if gr, err := kvDB.Get(ctx, tbNameKey); err != nil { t.Fatal(err) } else if gr.Exists() { t.Fatalf("table descriptor key still exists after database is dropped") } if gr, err := kvDB.Get(ctx, dbDescKey); err != nil { t.Fatal(err) } else if gr.Exists() { t.Fatalf("database descriptor still exists after database is dropped") } if gr, err := kvDB.Get(ctx, dbNameKey); err != nil { t.Fatal(err) } else if gr.Exists() { t.Fatalf("database descriptor key still exists after database is dropped") } if gr, err := kvDB.Get(ctx, tbZoneKey); err != nil { t.Fatal(err) } else if gr.Exists() { t.Fatalf("table zone config entry still exists after the database is dropped") } if gr, err := kvDB.Get(ctx, dbZoneKey); err != nil { t.Fatal(err) } else if gr.Exists() { t.Fatalf("database zone config entry still exists after the database is dropped") } }
func TestDropTable(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := createTestServerParams() s, sqlDB, kvDB := serverutils.StartServer(t, params) defer s.Stopper().Stop() ctx := context.TODO() numRows := 2*sql.TableTruncateChunkSize + 1 createKVTable(t, sqlDB, numRows) tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "kv") nameKey := sqlbase.MakeNameMetadataKey(keys.MaxReservedDescID+1, "kv") gr, err := kvDB.Get(ctx, nameKey) if err != nil { t.Fatal(err) } if !gr.Exists() { t.Fatalf("Name entry %q does not exist", nameKey) } descKey := sqlbase.MakeDescMetadataKey(sqlbase.ID(gr.ValueInt())) // Add a zone config for the table. cfg := config.DefaultZoneConfig() buf, err := protoutil.Marshal(&cfg) if err != nil { t.Fatal(err) } if _, err := sqlDB.Exec(`INSERT INTO system.zones VALUES ($1, $2)`, tableDesc.ID, buf); err != nil { t.Fatal(err) } zoneKey := sqlbase.MakeZoneKey(tableDesc.ID) if gr, err := kvDB.Get(ctx, zoneKey); err != nil { t.Fatal(err) } else if !gr.Exists() { t.Fatalf("zone config entry not found") } tablePrefix := roachpb.Key(keys.MakeTablePrefix(uint32(tableDesc.ID))) checkKeyCount(t, kvDB, tablePrefix, 3*numRows) if _, err := sqlDB.Exec(`DROP TABLE t.kv`); err != nil { t.Fatal(err) } checkKeyCount(t, kvDB, tablePrefix, 0) // Test that deleted table cannot be used. This prevents regressions where // name -> descriptor ID caches might make this statement erronously work. if _, err := sqlDB.Exec(`SELECT * FROM t.kv`); !testutils.IsError(err, `table "t.kv" does not exist`) { t.Fatalf("different error than expected: %v", err) } if gr, err := kvDB.Get(ctx, descKey); err != nil { t.Fatal(err) } else if gr.Exists() { t.Fatalf("table descriptor still exists after the table is dropped") } if gr, err := kvDB.Get(ctx, nameKey); err != nil { t.Fatal(err) } else if gr.Exists() { t.Fatalf("table namekey still exists after the table is dropped") } if gr, err := kvDB.Get(ctx, zoneKey); err != nil { t.Fatal(err) } else if gr.Exists() { t.Fatalf("zone config entry still exists after the table is dropped") } }
// TestGetZoneConfig exercises config.GetZoneConfig and the sql hook for it. func TestGetZoneConfig(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := createTestServerParams() srv, sqlDB, _ := serverutils.StartServer(t, params) defer srv.Stopper().Stop() s := srv.(*server.TestServer) expectedCounter := uint32(keys.MaxReservedDescID) defaultZoneConfig := config.DefaultZoneConfig() defaultZoneConfig.RangeMinBytes = 1 << 20 defaultZoneConfig.RangeMaxBytes = 1 << 20 defaultZoneConfig.GC.TTLSeconds = 60 { buf, err := protoutil.Marshal(&defaultZoneConfig) if err != nil { t.Fatal(err) } objID := keys.RootNamespaceID if _, err = sqlDB.Exec(`UPDATE system.zones SET config = $2 WHERE id = $1`, objID, buf); err != nil { t.Fatalf("problem writing zone %+v: %s", defaultZoneConfig, err) } } // Naming scheme for database and tables: // db1 has tables tb11 and tb12 // db2 has tables tb21 and tb22 expectedCounter++ db1 := expectedCounter if _, err := sqlDB.Exec(`CREATE DATABASE db1`); err != nil { t.Fatal(err) } expectedCounter++ db2 := expectedCounter if _, err := sqlDB.Exec(`CREATE DATABASE db2`); err != nil { t.Fatal(err) } expectedCounter++ tb11 := expectedCounter if _, err := sqlDB.Exec(`CREATE TABLE db1.tb1 (k INT PRIMARY KEY, v INT)`); err != nil { t.Fatal(err) } expectedCounter++ tb12 := expectedCounter if _, err := sqlDB.Exec(`CREATE TABLE db1.tb2 (k INT PRIMARY KEY, v INT)`); err != nil { t.Fatal(err) } expectedCounter++ tb21 := expectedCounter if _, err := sqlDB.Exec(`CREATE TABLE db2.tb1 (k INT PRIMARY KEY, v INT)`); err != nil { t.Fatal(err) } expectedCounter++ tb22 := expectedCounter if _, err := sqlDB.Exec(`CREATE TABLE db2.tb2 (k INT PRIMARY KEY, v INT)`); err != nil { t.Fatal(err) } { cfg := forceNewConfig(t, s) // We have no custom zone configs. testCases := []struct { key roachpb.RKey zoneCfg config.ZoneConfig }{ {roachpb.RKeyMin, defaultZoneConfig}, {keys.MakeTablePrefix(0), defaultZoneConfig}, {keys.MakeTablePrefix(1), defaultZoneConfig}, {keys.MakeTablePrefix(keys.MaxReservedDescID), defaultZoneConfig}, {keys.MakeTablePrefix(db1), defaultZoneConfig}, {keys.MakeTablePrefix(db2), defaultZoneConfig}, {keys.MakeTablePrefix(tb11), defaultZoneConfig}, {keys.MakeTablePrefix(tb12), defaultZoneConfig}, {keys.MakeTablePrefix(tb21), defaultZoneConfig}, {keys.MakeTablePrefix(tb22), defaultZoneConfig}, } for tcNum, tc := range testCases { zoneCfg, err := cfg.GetZoneConfigForKey(tc.key) if err != nil { t.Fatalf("#%d: err=%s", tcNum, err) } if !proto.Equal(&zoneCfg, &tc.zoneCfg) { t.Errorf("#%d: bad zone config.\nexpected: %+v\ngot: %+v", tcNum, tc.zoneCfg, zoneCfg) } } } // Now set some zone configs. We don't have a nice way of using table // names for this, so we do raw puts. // Here is the list of dbs/tables and whether they have a custom zone config: // db1: true // tb1: true // tb2: false // db1: false // tb1: true // tb2: false db1Cfg := config.ZoneConfig{ NumReplicas: 1, Constraints: config.Constraints{Constraints: []config.Constraint{{Value: "db1"}}}, } tb11Cfg := config.ZoneConfig{ NumReplicas: 1, Constraints: config.Constraints{Constraints: []config.Constraint{{Value: "db1.tb1"}}}, } tb21Cfg := config.ZoneConfig{ NumReplicas: 1, Constraints: config.Constraints{Constraints: []config.Constraint{{Value: "db2.tb1"}}}, } for objID, objZone := range map[uint32]config.ZoneConfig{ db1: db1Cfg, tb11: tb11Cfg, tb21: tb21Cfg, } { buf, err := protoutil.Marshal(&objZone) if err != nil { t.Fatal(err) } if _, err = sqlDB.Exec(`INSERT INTO system.zones VALUES ($1, $2)`, objID, buf); err != nil { t.Fatalf("problem writing zone %+v: %s", objZone, err) } } { cfg := forceNewConfig(t, s) testCases := []struct { key roachpb.RKey zoneCfg config.ZoneConfig }{ {roachpb.RKeyMin, defaultZoneConfig}, {keys.MakeTablePrefix(0), defaultZoneConfig}, {keys.MakeTablePrefix(1), defaultZoneConfig}, {keys.MakeTablePrefix(keys.MaxReservedDescID), defaultZoneConfig}, {keys.MakeTablePrefix(db1), db1Cfg}, {keys.MakeTablePrefix(db2), defaultZoneConfig}, {keys.MakeTablePrefix(tb11), tb11Cfg}, {keys.MakeTablePrefix(tb12), db1Cfg}, {keys.MakeTablePrefix(tb21), tb21Cfg}, {keys.MakeTablePrefix(tb22), defaultZoneConfig}, } for tcNum, tc := range testCases { zoneCfg, err := cfg.GetZoneConfigForKey(tc.key) if err != nil { t.Fatalf("#%d: err=%s", tcNum, err) } if !proto.Equal(&zoneCfg, &tc.zoneCfg) { t.Errorf("#%d: bad zone config.\nexpected: %+v\ngot: %+v", tcNum, tc.zoneCfg, zoneCfg) } } } }
) // DefaultTCP is the default SQL/RPC port specification. const DefaultTCP nat.Port = base.DefaultPort + "/tcp" const defaultHTTP nat.Port = base.DefaultHTTPPort + "/tcp" // CockroachBinaryInContainer is the container-side path to the CockroachDB // binary. const CockroachBinaryInContainer = "/cockroach/cockroach" var cockroachImage = flag.String("i", builderImageFull, "the docker image to run") var cockroachBinary = flag.String("b", defaultBinary(), "the host-side binary to run (if image == "+builderImage+")") var cockroachEntry = flag.String("e", "", "the entry point for the image") var waitOnStop = flag.Bool("w", false, "wait for the user to interrupt before tearing down the cluster") var pwd = filepath.Clean(os.ExpandEnv("${PWD}")) var maxRangeBytes = config.DefaultZoneConfig().RangeMaxBytes // keyLen is the length (in bits) of the generated CA and node certs. const keyLen = 1024 func defaultBinary() string { gopath := filepath.SplitList(os.Getenv("GOPATH")) if len(gopath) == 0 { return "" } return gopath[0] + "/bin/docker_amd64/cockroach" } func exists(path string) bool { if _, err := os.Stat(path); os.IsNotExist(err) { return false