// PGUrl returns a postgres connection url which connects to this server with // the given user. Returns a connection string and a cleanup function which must // be called after any connection created using the string has been closed. // // In order to connect securely using postgres, this method will create // temporary on-disk copies of certain embedded security certificates. The // certificates will be created as temporary files in the provided directory, // and their filenames will have the provided prefix. The returned cleanup // function will delete these temporary files. func PGUrl(t util.Tester, ts *server.TestServer, user, tempDir, prefix string) (url.URL, func()) { host, port, err := net.SplitHostPort(ts.PGAddr()) if err != nil { t.Fatal(err) } caPath := filepath.Join(security.EmbeddedCertsDir, "ca.crt") certPath := security.ClientCertPath(security.EmbeddedCertsDir, user) keyPath := security.ClientKeyPath(security.EmbeddedCertsDir, user) // Copy these assets to disk from embedded strings, so this test can // run from a standalone binary. tempCAPath, tempCACleanup := securitytest.TempRestrictedCopy(t, caPath, tempDir, "TestLogic_ca") tempCertPath, tempCertCleanup := securitytest.TempRestrictedCopy(t, certPath, tempDir, "TestLogic_cert") tempKeyPath, tempKeyCleanup := securitytest.TempRestrictedCopy(t, keyPath, tempDir, "TestLogic_key") return url.URL{ Scheme: "postgres", User: url.User(user), Host: net.JoinHostPort(host, port), RawQuery: fmt.Sprintf("sslmode=verify-full&sslrootcert=%s&sslcert=%s&sslkey=%s", url.QueryEscape(tempCAPath), url.QueryEscape(tempCertPath), url.QueryEscape(tempKeyPath), ), }, func() { tempCACleanup() tempCertCleanup() tempKeyCleanup() } }
func getLatestConfig(s *server.TestServer, expected int) (cfg *config.SystemConfig, err error) { err = util.IsTrueWithin(func() bool { cfg = s.Gossip().GetSystemConfig() return cfg != nil && len(cfg.Values) == expected }, 500*time.Millisecond) return }
func makeTestDBClient(t *testing.T, s *server.TestServer) *sql.DB { db, err := sql.Open("cockroach", fmt.Sprintf("https://%s@%s?certs=test_certs", security.RootUser, s.ServingAddr())) if err != nil { t.Fatal(err) } return db }
func acquire(s server.TestServer, descID sqlbase.ID, version sqlbase.DescriptorVersion) (*csql.LeaseState, error) { var lease *csql.LeaseState err := s.DB().Txn(func(txn *client.Txn) error { var err error lease, err = s.LeaseManager().(*csql.LeaseManager).Acquire(txn, descID, version) return err }) return lease, err }
func getLatestConfig(s *server.TestServer, expected int) (cfg *config.SystemConfig, err error) { err = util.IsTrueWithin(func() bool { var err2 error cfg, err2 = s.Gossip().GetSystemConfig() if err2 != nil { return false } return len(cfg.Values) != expected }, 500*time.Millisecond) return }
// setupMultipleRanges creates a test server and splits the // key range at the given keys. Returns the test server and client. // The caller is responsible for stopping the server and // closing the client. func setupMultipleRanges(t *testing.T, ts *server.TestServer, splitAt ...string) *client.DB { db := createTestClient(t, ts.Stopper(), ts.ServingAddr()) // Split the keyspace at the given keys. for _, key := range splitAt { if err := db.AdminSplit(key); err != nil { // Don't leak server goroutines. t.Fatal(err) } } return db }
func waitForConfigChange(t *testing.T, s *server.TestServer) (*config.SystemConfig, error) { var foundDesc sql.DatabaseDescriptor var cfg *config.SystemConfig return cfg, util.IsTrueWithin(func() bool { if cfg = s.Gossip().GetSystemConfig(); cfg != nil { if val := cfg.GetValue(configDescKey); val != nil { if err := val.GetProto(&foundDesc); err != nil { t.Fatal(err) } return foundDesc.ID == configID } } return false }, 10*time.Second) }
func newCLITest() cliTest { // Reset the client context for each test. We don't reset the // pointer (because they are tied into the flags), but instead // overwrite the existing struct's values. baseCtx.InitDefaults() osStderr = os.Stdout var s server.TestServer if err := s.Start(); err != nil { log.Fatalf("Could not start server: %v", err) } tempDir, err := ioutil.TempDir("", "cli-test") if err != nil { log.Fatal(err) } // Copy these assets to disk from embedded strings, so this test can // run from a standalone binary. // Disable embedded certs, or the security library will try to load // our real files as embedded assets. security.ResetReadFileFn() assets := []string{ filepath.Join(security.EmbeddedCertsDir, security.EmbeddedCACert), filepath.Join(security.EmbeddedCertsDir, security.EmbeddedCAKey), filepath.Join(security.EmbeddedCertsDir, security.EmbeddedNodeCert), filepath.Join(security.EmbeddedCertsDir, security.EmbeddedNodeKey), filepath.Join(security.EmbeddedCertsDir, security.EmbeddedRootCert), filepath.Join(security.EmbeddedCertsDir, security.EmbeddedRootKey), } for _, a := range assets { securitytest.RestrictedCopy(nil, a, tempDir, filepath.Base(a)) } return cliTest{ TestServer: s, certsDir: tempDir, cleanupFunc: func() { if err := os.RemoveAll(tempDir); err != nil { log.Fatal(err) } }, } }
// forceNewConfig forces a system config update by writing a bogus descriptor with an // incremented value inside. It then repeatedly fetches the gossip config until the // just-written descriptor is found. func forceNewConfig(t *testing.T, s *server.TestServer) (*config.SystemConfig, error) { configID++ configDesc := sql.DatabaseDescriptor{ Name: "sentinel", ID: configID, Privileges: &sql.PrivilegeDescriptor{}, } // This needs to be done in a transaction with the system trigger set. if err := s.DB().Txn(func(txn *client.Txn) error { txn.SetSystemDBTrigger() return txn.Put(configDescKey, &configDesc) }); err != nil { t.Fatal(err) } return waitForConfigChange(t, s) }
func initReverseScanTestEnv(s *server.TestServer, t *testing.T) *client.DB { db := createTestClient(t, s.Stopper(), s.ServingAddr()) // Set up multiple ranges: // ["", "b"),["b", "e") ,["e", "g") and ["g", "\xff\xff"). for _, key := range []string{"b", "e", "g"} { // Split the keyspace at the given key. if pErr := db.AdminSplit(key); pErr != nil { t.Fatal(pErr) } } // Write keys before, at, and after the split key. for _, key := range []string{"a", "b", "c", "d", "e", "f", "g", "h"} { if pErr := db.Put(key, "value"); pErr != nil { t.Fatal(pErr) } } return db }
func waitForConfigChange(t *testing.T, s *server.TestServer) (cfg *config.SystemConfig, err error) { var foundDesc sql.DatabaseDescriptor err = util.IsTrueWithin(func() bool { cfg = s.Gossip().GetSystemConfig() if cfg == nil { return false } raw, ok := cfg.GetValue(configDescKey) if !ok { return false } if err2 := proto.Unmarshal(raw, &foundDesc); err2 != nil { t.Fatalf("could not unmarshal raw value: %s", err2) return false } return foundDesc.ID == configID }, 10*time.Second) return }
func waitForConfigChange(t *testing.T, s *server.TestServer) config.SystemConfig { var foundDesc sqlbase.Descriptor var cfg config.SystemConfig util.SucceedsSoon(t, func() error { var ok bool if cfg, ok = s.Gossip().GetSystemConfig(); ok { if val := cfg.GetValue(configDescKey); val != nil { if err := val.GetProto(&foundDesc); err != nil { t.Fatal(err) } if id := foundDesc.GetDatabase().GetID(); id != configID { return errors.Errorf("expected database id %d; got %d", configID, id) } return nil } } return errors.Errorf("got nil system config") }) return cfg }
// PGUrl returns a postgres connection url which connects to this server with the given user, and a // cleanup function which must be called after all connections created using the connection url have // been closed. // // In order to connect securely using postgres, this method will create temporary on-disk copies of // certain embedded security certificates. The certificates will be created in a new temporary // directory. The returned cleanup function will delete this temporary directory. func PGUrl(t testing.TB, ts *server.TestServer, user, prefix string) (url.URL, func()) { host, port, err := net.SplitHostPort(ts.PGAddr()) if err != nil { t.Fatal(err) } tempDir, err := ioutil.TempDir("", prefix) if err != nil { t.Fatal(err) } caPath := security.CACertPath(security.EmbeddedCertsDir) certPath := security.ClientCertPath(security.EmbeddedCertsDir, user) keyPath := security.ClientKeyPath(security.EmbeddedCertsDir, user) // Copy these assets to disk from embedded strings, so this test can // run from a standalone binary. tempCAPath := securitytest.RestrictedCopy(t, caPath, tempDir, "ca") tempCertPath := securitytest.RestrictedCopy(t, certPath, tempDir, "cert") tempKeyPath := securitytest.RestrictedCopy(t, keyPath, tempDir, "key") options := url.Values{} options.Add("sslmode", "verify-full") options.Add("sslrootcert", tempCAPath) options.Add("sslcert", tempCertPath) options.Add("sslkey", tempKeyPath) return url.URL{ Scheme: "postgres", User: url.User(user), Host: net.JoinHostPort(host, port), RawQuery: options.Encode(), }, func() { if err := os.RemoveAll(tempDir); err != nil { // Not Fatal() because we might already be panicking. t.Error(err) } } }
// checkPGWireMetrics returns the server's pgwire bytesIn/bytesOut and an error if the // bytesIn/bytesOut don't satisfy the given minimums and maximums. func checkPGWireMetrics(s *server.TestServer, minBytesIn, minBytesOut, maxBytesIn, maxBytesOut int64) (int64, int64, error) { nid := s.Gossip().GetNodeID().String() if err := s.WriteSummaries(); err != nil { return -1, -1, err } bytesIn := s.MustGetCounter("cr.node.pgwire.bytesin." + nid) bytesOut := s.MustGetCounter("cr.node.pgwire.bytesout." + nid) if a, min := bytesIn, minBytesIn; a < min { return bytesIn, bytesOut, util.Errorf("bytesin %d < expected min %d", a, min) } if a, min := bytesOut, minBytesOut; a < min { return bytesIn, bytesOut, util.Errorf("bytesout %d < expected min %d", a, min) } if a, max := bytesIn, maxBytesIn; a > max { return bytesIn, bytesOut, util.Errorf("bytesin %d > expected max %d", a, max) } if a, max := bytesOut, maxBytesOut; a > max { return bytesIn, bytesOut, util.Errorf("bytesout %d > expected max %d", a, max) } return bytesIn, bytesOut, nil }
func TestDB_Put_insecure(t *testing.T) { defer leaktest.AfterTest(t)() ctx := server.MakeTestContext() ctx.Insecure = true s := server.TestServer{ Ctx: &ctx, } if err := s.Start(); err != nil { log.Fatalf("Could not start server: %v", err) } defer s.Stop() db := s.DB() if err := db.Put("aa", "1"); err != nil { panic(err) } result, err := db.Get("aa") if err != nil { panic(err) } checkResult(t, []byte("1"), result.ValueBytes()) }
func ExampleDB_Put_insecure() { ctx := server.MakeTestContext() ctx.Insecure = true s := server.TestServer{ Ctx: &ctx, } if err := s.Start(); err != nil { log.Fatalf("Could not start server: %v", err) } defer s.Stop() db := s.DB() if err := db.Put("aa", "1"); err != nil { panic(err) } result, err := db.Get("aa") if err != nil { panic(err) } fmt.Printf("aa=%s\n", result.ValueBytes()) // Output: // aa=1 }
// checkSQLNetworkMetrics returns the server's pgwire bytesIn/bytesOut and an // error if the bytesIn/bytesOut don't satisfy the given minimums and maximums. func checkSQLNetworkMetrics(s server.TestServer, minBytesIn, minBytesOut, maxBytesIn, maxBytesOut int64) (int64, int64, error) { if err := s.WriteSummaries(); err != nil { return -1, -1, err } bytesIn := s.MustGetSQLNetworkCounter("bytesin") bytesOut := s.MustGetSQLNetworkCounter("bytesout") if a, min := bytesIn, minBytesIn; a < min { return bytesIn, bytesOut, util.Errorf("bytesin %d < expected min %d", a, min) } if a, min := bytesOut, minBytesOut; a < min { return bytesIn, bytesOut, util.Errorf("bytesout %d < expected min %d", a, min) } if a, max := bytesIn, maxBytesIn; a > max { return bytesIn, bytesOut, util.Errorf("bytesin %d > expected max %d", a, max) } if a, max := bytesOut, maxBytesOut; a > max { return bytesIn, bytesOut, util.Errorf("bytesout %d > expected max %d", a, max) } return bytesIn, bytesOut, nil }
// This is a fairly high-level test of CA and node certificates. // We construct SSL server and clients and use the generated certs. func TestUseCerts(t *testing.T) { defer leaktest.AfterTest(t)() // Do not mock cert access for this test. security.ResetReadFileFn() defer ResetTest() certsDir := util.CreateTempDir(t, "certs_test") defer util.CleanupDir(certsDir) err := security.RunCreateCACert( filepath.Join(certsDir, security.EmbeddedCACert), filepath.Join(certsDir, security.EmbeddedCAKey), 512) if err != nil { t.Fatalf("Expected success, got %v", err) } err = security.RunCreateNodeCert( filepath.Join(certsDir, security.EmbeddedCACert), filepath.Join(certsDir, security.EmbeddedCAKey), filepath.Join(certsDir, security.EmbeddedNodeCert), filepath.Join(certsDir, security.EmbeddedNodeKey), 512, []string{"127.0.0.1"}) if err != nil { t.Fatalf("Expected success, got %v", err) } err = security.RunCreateClientCert( filepath.Join(certsDir, security.EmbeddedCACert), filepath.Join(certsDir, security.EmbeddedCAKey), filepath.Join(certsDir, security.EmbeddedRootCert), filepath.Join(certsDir, security.EmbeddedRootKey), 512, security.RootUser) if err != nil { t.Fatalf("Expected success, got %v", err) } // Load TLS Configs. This is what TestServer and HTTPClient do internally. _, err = security.LoadServerTLSConfig( filepath.Join(certsDir, security.EmbeddedCACert), filepath.Join(certsDir, security.EmbeddedNodeCert), filepath.Join(certsDir, security.EmbeddedNodeKey)) if err != nil { t.Fatalf("Expected success, got %v", err) } _, err = security.LoadClientTLSConfig( filepath.Join(certsDir, security.EmbeddedCACert), filepath.Join(certsDir, security.EmbeddedNodeCert), filepath.Join(certsDir, security.EmbeddedNodeKey)) if err != nil { t.Fatalf("Expected success, got %v", err) } // Start a test server and override certs. // We use a real context since we want generated certs. ctx := server.MakeContext() ctx.Insecure = false ctx.SSLCA = filepath.Join(certsDir, security.EmbeddedCACert) ctx.SSLCert = filepath.Join(certsDir, security.EmbeddedNodeCert) ctx.SSLCertKey = filepath.Join(certsDir, security.EmbeddedNodeKey) ctx.User = security.NodeUser ctx.Addr = "127.0.0.1:0" ctx.HTTPAddr = "127.0.0.1:0" s := server.TestServer{Ctx: &ctx} if err := s.Start(); err != nil { t.Fatal(err) } defer s.Stop() // Insecure mode. clientContext := testutils.NewNodeTestBaseContext() clientContext.Insecure = true httpClient, err := clientContext.GetHTTPClient() if err != nil { t.Fatal(err) } req, err := http.NewRequest("GET", s.Ctx.AdminURL()+"/_admin/v1/health", nil) if err != nil { t.Fatalf("could not create request: %v", err) } resp, err := httpClient.Do(req) if err == nil { resp.Body.Close() t.Fatalf("Expected SSL error, got success") } // Secure mode but no Certs: permissive config. clientContext = testutils.NewNodeTestBaseContext() clientContext.Insecure = false clientContext.SSLCert = "" httpClient, err = clientContext.GetHTTPClient() if err != nil { t.Fatal(err) } // Endpoint that does not enforce client auth (see: server/authentication_test.go) req, err = http.NewRequest("GET", s.Ctx.AdminURL()+"/_admin/v1/health", nil) if err != nil { t.Fatalf("could not create request: %v", err) } resp, err = httpClient.Do(req) if err != nil { t.Fatalf("Expected success, got %v", err) } resp.Body.Close() if resp.StatusCode != http.StatusOK { t.Fatalf("Expected OK, got: %d", resp.StatusCode) } // New client. With certs this time. clientContext = testutils.NewNodeTestBaseContext() clientContext.SSLCA = filepath.Join(certsDir, security.EmbeddedCACert) clientContext.SSLCert = filepath.Join(certsDir, security.EmbeddedNodeCert) clientContext.SSLCertKey = filepath.Join(certsDir, security.EmbeddedNodeKey) httpClient, err = clientContext.GetHTTPClient() if err != nil { t.Fatalf("Expected success, got %v", err) } req, err = http.NewRequest("GET", s.Ctx.AdminURL()+"/_admin/v1/health", nil) if err != nil { t.Fatalf("could not create request: %v", err) } resp, err = httpClient.Do(req) if err != nil { t.Fatalf("Expected success, got %v", err) } resp.Body.Close() if resp.StatusCode != http.StatusOK { t.Fatalf("Expected OK, got: %d", resp.StatusCode) } }
func TestHttpQuery(t *testing.T) { defer leaktest.AfterTest(t)() var tsrv server.TestServer if err := tsrv.Start(); err != nil { t.Fatal(err) } defer tsrv.Stop() // Populate data directly. tsdb := tsrv.TsDB() if err := tsdb.StoreData(ts.Resolution10s, []ts.TimeSeriesData{ { Name: "test.metric", Source: "source1", Datapoints: []ts.TimeSeriesDatapoint{ { TimestampNanos: 400 * 1e9, Value: 100.0, }, { TimestampNanos: 500 * 1e9, Value: 200.0, }, { TimestampNanos: 520 * 1e9, Value: 300.0, }, }, }, { Name: "test.metric", Source: "source2", Datapoints: []ts.TimeSeriesDatapoint{ { TimestampNanos: 400 * 1e9, Value: 100.0, }, { TimestampNanos: 500 * 1e9, Value: 200.0, }, { TimestampNanos: 510 * 1e9, Value: 250.0, }, { TimestampNanos: 530 * 1e9, Value: 350.0, }, }, }, { Name: "other.metric", Datapoints: []ts.TimeSeriesDatapoint{ { TimestampNanos: 400 * 1e9, Value: 100.0, }, { TimestampNanos: 500 * 1e9, Value: 200.0, }, { TimestampNanos: 510 * 1e9, Value: 250.0, }, }, }, }); err != nil { t.Fatal(err) } expectedResult := ts.TimeSeriesQueryResponse{ Results: []ts.TimeSeriesQueryResponse_Result{ { Query: ts.Query{ Name: "test.metric", Sources: []string{"source1", "source2"}, Downsampler: ts.TimeSeriesQueryAggregator_AVG.Enum(), SourceAggregator: ts.TimeSeriesQueryAggregator_SUM.Enum(), Derivative: ts.TimeSeriesQueryDerivative_NONE.Enum(), }, Datapoints: []ts.TimeSeriesDatapoint{ { TimestampNanos: 505 * 1e9, Value: 400.0, }, { TimestampNanos: 515 * 1e9, Value: 500.0, }, { TimestampNanos: 525 * 1e9, Value: 600.0, }, }, }, { Query: ts.Query{ Name: "other.metric", Sources: []string{""}, Downsampler: ts.TimeSeriesQueryAggregator_AVG.Enum(), SourceAggregator: ts.TimeSeriesQueryAggregator_SUM.Enum(), Derivative: ts.TimeSeriesQueryDerivative_NONE.Enum(), }, Datapoints: []ts.TimeSeriesDatapoint{ { TimestampNanos: 505 * 1e9, Value: 200.0, }, { TimestampNanos: 515 * 1e9, Value: 250.0, }, }, }, { Query: ts.Query{ Name: "test.metric", Sources: []string{"source1", "source2"}, Downsampler: ts.TimeSeriesQueryAggregator_MAX.Enum(), SourceAggregator: ts.TimeSeriesQueryAggregator_MAX.Enum(), Derivative: ts.TimeSeriesQueryDerivative_DERIVATIVE.Enum(), }, Datapoints: []ts.TimeSeriesDatapoint{ { TimestampNanos: 505 * 1e9, Value: 1.0, }, { TimestampNanos: 515 * 1e9, Value: 5.0, }, { TimestampNanos: 525 * 1e9, Value: 5.0, }, }, }, }, } var response ts.TimeSeriesQueryResponse session := makeTestHTTPSession(t, &tsrv.Ctx.Context, tsrv.HTTPAddr()) if err := session.PostProto(ts.URLQuery, &ts.TimeSeriesQueryRequest{ StartNanos: 500 * 1e9, EndNanos: 526 * 1e9, Queries: []ts.Query{ { Name: "test.metric", }, { Name: "other.metric", }, { Name: "test.metric", Downsampler: ts.TimeSeriesQueryAggregator_MAX.Enum(), SourceAggregator: ts.TimeSeriesQueryAggregator_MAX.Enum(), Derivative: ts.TimeSeriesQueryDerivative_DERIVATIVE.Enum(), }, }, }, &response); err != nil { t.Fatal(err) } for _, r := range response.Results { sort.Strings(r.Sources) } if !reflect.DeepEqual(response, expectedResult) { t.Fatalf("actual response \n%v\n did not match expected response \n%v", response, expectedResult) } }
func TestQuery(t *testing.T) { defer leaktest.AfterTest(t)() var tsrv server.TestServer if err := tsrv.Start(); err != nil { t.Fatal(err) } defer tsrv.Stop() // Populate data directly. tsdb := tsrv.TsDB() if err := tsdb.StoreData(ts.Resolution10s, []tspb.TimeSeriesData{ { Name: "test.metric", Source: "source1", Datapoints: []tspb.TimeSeriesDatapoint{ { TimestampNanos: 400 * 1e9, Value: 100.0, }, { TimestampNanos: 500 * 1e9, Value: 200.0, }, { TimestampNanos: 520 * 1e9, Value: 300.0, }, }, }, { Name: "test.metric", Source: "source2", Datapoints: []tspb.TimeSeriesDatapoint{ { TimestampNanos: 400 * 1e9, Value: 100.0, }, { TimestampNanos: 500 * 1e9, Value: 200.0, }, { TimestampNanos: 510 * 1e9, Value: 250.0, }, { TimestampNanos: 530 * 1e9, Value: 350.0, }, }, }, { Name: "other.metric", Datapoints: []tspb.TimeSeriesDatapoint{ { TimestampNanos: 400 * 1e9, Value: 100.0, }, { TimestampNanos: 500 * 1e9, Value: 200.0, }, { TimestampNanos: 510 * 1e9, Value: 250.0, }, }, }, }); err != nil { t.Fatal(err) } expectedResult := &tspb.TimeSeriesQueryResponse{ Results: []tspb.TimeSeriesQueryResponse_Result{ { Query: tspb.Query{ Name: "test.metric", Sources: []string{"source1", "source2"}, Downsampler: tspb.TimeSeriesQueryAggregator_AVG.Enum(), SourceAggregator: tspb.TimeSeriesQueryAggregator_SUM.Enum(), Derivative: tspb.TimeSeriesQueryDerivative_NONE.Enum(), }, Datapoints: []tspb.TimeSeriesDatapoint{ { TimestampNanos: 505 * 1e9, Value: 400.0, }, { TimestampNanos: 515 * 1e9, Value: 500.0, }, { TimestampNanos: 525 * 1e9, Value: 600.0, }, }, }, { Query: tspb.Query{ Name: "other.metric", Sources: []string{""}, Downsampler: tspb.TimeSeriesQueryAggregator_AVG.Enum(), SourceAggregator: tspb.TimeSeriesQueryAggregator_SUM.Enum(), Derivative: tspb.TimeSeriesQueryDerivative_NONE.Enum(), }, Datapoints: []tspb.TimeSeriesDatapoint{ { TimestampNanos: 505 * 1e9, Value: 200.0, }, { TimestampNanos: 515 * 1e9, Value: 250.0, }, }, }, { Query: tspb.Query{ Name: "test.metric", Sources: []string{"source1", "source2"}, Downsampler: tspb.TimeSeriesQueryAggregator_MAX.Enum(), SourceAggregator: tspb.TimeSeriesQueryAggregator_MAX.Enum(), Derivative: tspb.TimeSeriesQueryDerivative_DERIVATIVE.Enum(), }, Datapoints: []tspb.TimeSeriesDatapoint{ { TimestampNanos: 505 * 1e9, Value: 1.0, }, { TimestampNanos: 515 * 1e9, Value: 5.0, }, { TimestampNanos: 525 * 1e9, Value: 5.0, }, }, }, }, } conn, err := tsrv.RPCContext().GRPCDial(tsrv.Ctx.Addr) if err != nil { t.Fatal(err) } response, err := tspb.NewTimeSeriesClient(conn).Query(context.Background(), &tspb.TimeSeriesQueryRequest{ StartNanos: 500 * 1e9, EndNanos: 526 * 1e9, Queries: []tspb.Query{ { Name: "test.metric", }, { Name: "other.metric", }, { Name: "test.metric", Downsampler: tspb.TimeSeriesQueryAggregator_MAX.Enum(), SourceAggregator: tspb.TimeSeriesQueryAggregator_MAX.Enum(), Derivative: tspb.TimeSeriesQueryDerivative_DERIVATIVE.Enum(), }, }, }) if err != nil { t.Fatal(err) } for _, r := range response.Results { sort.Strings(r.Sources) } if !proto.Equal(response, expectedResult) { t.Fatalf("actual response \n%v\n did not match expected response \n%v", response, expectedResult) } }
func cleanupTestServer(s *server.TestServer) { s.Stop() storage.TestingCommandFilter = nil }
func cleanup(s *server.TestServer, db *sql.DB) { _ = db.Close() s.Stop() }
// TestRequestToUninitializedRange tests the behavior when a request // is sent to a node which should be a replica of the correct range // but has not yet received its initial snapshot. This would // previously panic due to a malformed error response from the server, // as seen in https://github.com/cockroachdb/cockroach/issues/6027. // // Prior to the other changes in the commit that introduced it, this // test would reliable trigger the panic from #6027. However, it // relies on some hacky tricks to both trigger the panic and shut down // cleanly. If this test needs a lot of maintenance in the future we // should be willing to get rid of it. func TestRequestToUninitializedRange(t *testing.T) { defer leaktest.AfterTest(t)() s := server.TestServer{StoresPerNode: 2} if err := s.Start(); err != nil { t.Fatalf("Could not start server: %v", err) } defer s.Stop() // Choose a range ID that is much larger than any that would be // created by initial splits. const rangeID = roachpb.RangeID(1000) // Set up a range with replicas on two stores of the same node. This // ensures that the DistSender will consider both replicas healthy // and will try to talk to both (so we can get a non-retryable error // from the second store). replica1 := roachpb.ReplicaDescriptor{ NodeID: 1, StoreID: 1, ReplicaID: 1, } replica2 := roachpb.ReplicaDescriptor{ NodeID: 1, StoreID: 2, ReplicaID: 2, } // HACK: remove the second store from the node to generate a // non-retryable error when we try to talk to it. store2, err := s.Stores().GetStore(2) if err != nil { t.Fatal(err) } s.Stores().RemoveStore(store2) // Create the uninitialized range by sending an isolated raft // message to the first store. conn, err := s.RPCContext().GRPCDial(s.ServingAddr()) if err != nil { t.Fatal(err) } raftClient := storage.NewMultiRaftClient(conn) ctx, cancel := context.WithCancel(context.Background()) defer cancel() stream, err := raftClient.RaftMessage(ctx) if err != nil { t.Fatal(err) } msg := storage.RaftMessageRequest{ GroupID: rangeID, ToReplica: replica1, FromReplica: replica2, Message: raftpb.Message{ Type: raftpb.MsgApp, To: 1, }, } if err := stream.Send(&msg); err != nil { t.Fatal(err) } // Make sure the replica was created. store1, err := s.Stores().GetStore(1) if err != nil { t.Fatal(err) } util.SucceedsSoon(t, func() error { if replica, err := store1.GetReplica(rangeID); err != nil { return util.Errorf("failed to look up replica: %s", err) } else if replica.IsInitialized() { return util.Errorf("expected replica to be uninitialized") } return nil }) // Create our own DistSender so we can force some requests to the // bogus range. The DistSender needs to be in scope for its own // MockRangeDescriptorDB closure. var sender *kv.DistSender sender = kv.NewDistSender(&kv.DistSenderContext{ Clock: s.Clock(), RPCContext: s.RPCContext(), RangeDescriptorDB: kv.MockRangeDescriptorDB( func(key roachpb.RKey, considerIntents, useReverseScan bool, ) ([]roachpb.RangeDescriptor, []roachpb.RangeDescriptor, *roachpb.Error) { if key.Equal(roachpb.RKeyMin) { // Pass through requests for the first range to the real sender. desc, err := sender.FirstRange() if err != nil { return nil, nil, roachpb.NewError(err) } return []roachpb.RangeDescriptor{*desc}, nil, nil } return []roachpb.RangeDescriptor{{ RangeID: rangeID, StartKey: roachpb.RKey(keys.Meta2Prefix), EndKey: roachpb.RKeyMax, Replicas: []roachpb.ReplicaDescriptor{replica1, replica2}, }}, nil, nil }), }, s.Gossip()) // Only inconsistent reads triggered the panic in #6027. hdr := roachpb.Header{ ReadConsistency: roachpb.INCONSISTENT, } req := roachpb.NewGet(roachpb.Key("asdf")) // Repeat the test a few times: due to the randomization between the // two replicas, each attempt only had a 50% chance of triggering // the panic. for i := 0; i < 5; i++ { _, pErr := client.SendWrappedWith(sender, context.Background(), hdr, req) // Each attempt fails with "store 2 not found" because that is the // non-retryable error. if !testutils.IsPError(pErr, "store 2 not found") { t.Fatal(pErr) } } }