func createTestDBWithContext( t testing.TB, dbCtx client.DBContext, ) (*localtestcluster.LocalTestCluster, *TxnCoordSender) { s := &localtestcluster.LocalTestCluster{ DBContext: &dbCtx, } s.Start(t, testutils.NewNodeTestBaseContext(), InitSenderForLocalTestCluster) return s, s.Sender.(*TxnCoordSender) }
// TestUncertaintyRestart verifies that a transaction which finds a write in // its near future will restart exactly once, meaning that it's made a note of // that node's clock for its new timestamp. func TestUncertaintyRestart(t *testing.T) { defer leaktest.AfterTest(t)() const maxOffset = 250 * time.Millisecond dbCtx := client.DefaultDBContext() s := &localtestcluster.LocalTestCluster{ Clock: hlc.NewClock(hlc.UnixNano, maxOffset), DBContext: &dbCtx, } s.Start(t, testutils.NewNodeTestBaseContext(), InitSenderForLocalTestCluster) defer s.Stop() if err := disableOwnNodeCertain(s); err != nil { t.Fatal(err) } s.Manual.Increment(s.Clock.MaxOffset().Nanoseconds() + 1) var key = roachpb.Key("a") errChan := make(chan error) start := make(chan struct{}) go func() { <-start errChan <- s.DB.Txn(context.TODO(), func(txn *client.Txn) error { return txn.Put(key, "hi") }) }() if err := s.DB.Txn(context.TODO(), func(txn *client.Txn) error { if txn.Proto.Epoch > 2 { t.Fatal("expected only one restart") } // Issue a read to pick a timestamp. if _, err := txn.Get(key.Next()); err != nil { t.Fatal(err) } if txn.Proto.Epoch == 0 { close(start) // let someone write into our future // when they're done, try to read if err := <-errChan; err != nil { t.Fatal(err) } } if _, err := txn.Get(key.Next()); err != nil { if _, ok := err.(*roachpb.ReadWithinUncertaintyIntervalError); !ok { t.Fatalf("unexpected error: %T: %s", err, err) } } return nil }); err != nil { t.Fatal(err) } }
// checkConcurrency creates a history verifier, starts a new database // and runs the verifier. func checkConcurrency( name string, isolations []enginepb.IsolationType, txns []string, verify *verifier, t *testing.T, ) { verifier := newHistoryVerifier(name, txns, verify, t) dbCtx := client.DefaultDBContext() dbCtx.TxnRetryOptions = correctnessTestRetryOptions s := &localtestcluster.LocalTestCluster{ DBContext: &dbCtx, RangeRetryOptions: &correctnessTestRetryOptions, } s.Start(t, testutils.NewNodeTestBaseContext(), InitSenderForLocalTestCluster) defer s.Stop() verifier.run(isolations, s.DB, t) }
func newRaftTransportTestContext(t testing.TB) *raftTransportTestContext { rttc := &raftTransportTestContext{ t: t, stopper: stop.NewStopper(), transports: map[roachpb.NodeID]*storage.RaftTransport{}, } rttc.nodeRPCContext = rpc.NewContext( log.AmbientContext{}, testutils.NewNodeTestBaseContext(), nil, rttc.stopper, ) server := rpc.NewServer(rttc.nodeRPCContext) // never started rttc.gossip = gossip.NewTest( 1, rttc.nodeRPCContext, server, nil, rttc.stopper, metric.NewRegistry(), ) return rttc }
// benchmarkSingleRoundtripWithLatency runs a number of transactions writing to // the same key back to back in a single round-trip. Latency is simulated // by pausing before each RPC sent. func benchmarkSingleRoundtripWithLatency(b *testing.B, latency time.Duration) { s := &localtestcluster.LocalTestCluster{} s.Latency = latency s.Start(b, testutils.NewNodeTestBaseContext(), InitSenderForLocalTestCluster) defer s.Stop() defer b.StopTimer() key := roachpb.Key("key") b.ResetTimer() for i := 0; i < b.N; i++ { if tErr := s.DB.Txn(context.TODO(), func(txn *client.Txn) error { b := txn.NewBatch() b.Put(key, fmt.Sprintf("value-%d", i)) return txn.CommitInBatch(b) }); tErr != nil { b.Fatal(tErr) } } }
"github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/httputil" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/metric" "github.com/cockroachdb/cockroach/pkg/util/tracing" "github.com/gogo/protobuf/jsonpb" ) var nodeTestBaseContext = testutils.NewNodeTestBaseContext() // TestSelfBootstrap verifies operation when no bootstrap hosts have // been specified. func TestSelfBootstrap(t *testing.T) { defer leaktest.AfterTest(t)() s, err := serverutils.StartServerRaw(base.TestServerArgs{}) if err != nil { t.Fatal(err) } defer s.Stopper().Stop() } // TestServerStartClock tests that a server's clock is not pushed out of thin // air. This used to happen - the simple act of starting was causing a server's // clock to be pushed because we were introducing bogus future timestamps into
// Verify client certificate enforcement and user whitelisting. func TestSSLEnforcement(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop() // HTTPS with client certs for security.RootUser. rootCertsContext := testutils.NewTestBaseContext(security.RootUser) // HTTPS with client certs for security.NodeUser. nodeCertsContext := testutils.NewNodeTestBaseContext() // HTTPS with client certs for TestUser. testCertsContext := testutils.NewTestBaseContext(TestUser) // HTTPS without client certs. The user does not matter. noCertsContext := insecureCtx{} // Plain http. insecureContext := testutils.NewTestBaseContext(TestUser) insecureContext.Insecure = true kvGet := &roachpb.GetRequest{} kvGet.Key = roachpb.Key("/") testCases := []struct { method, path string body proto.Message ctx ctxI success bool // request sent successfully (may be non-200) code int // http response code }{ // /ui/: basic file server: no auth. {"GET", "/index.html", nil, rootCertsContext, true, http.StatusOK}, {"GET", "/index.html", nil, nodeCertsContext, true, http.StatusOK}, {"GET", "/index.html", nil, testCertsContext, true, http.StatusOK}, {"GET", "/index.html", nil, noCertsContext, true, http.StatusOK}, {"GET", "/index.html", nil, insecureContext, true, http.StatusPermanentRedirect}, // /_admin/: server.adminServer: no auth. {"GET", adminPrefix + "health", nil, rootCertsContext, true, http.StatusOK}, {"GET", adminPrefix + "health", nil, nodeCertsContext, true, http.StatusOK}, {"GET", adminPrefix + "health", nil, testCertsContext, true, http.StatusOK}, {"GET", adminPrefix + "health", nil, noCertsContext, true, http.StatusOK}, {"GET", adminPrefix + "health", nil, insecureContext, true, http.StatusPermanentRedirect}, // /debug/: server.adminServer: no auth. {"GET", debugEndpoint + "vars", nil, rootCertsContext, true, http.StatusOK}, {"GET", debugEndpoint + "vars", nil, nodeCertsContext, true, http.StatusOK}, {"GET", debugEndpoint + "vars", nil, testCertsContext, true, http.StatusOK}, {"GET", debugEndpoint + "vars", nil, noCertsContext, true, http.StatusOK}, {"GET", debugEndpoint + "vars", nil, insecureContext, true, http.StatusPermanentRedirect}, // /_status/nodes: server.statusServer: no auth. {"GET", statusPrefix + "nodes", nil, rootCertsContext, true, http.StatusOK}, {"GET", statusPrefix + "nodes", nil, nodeCertsContext, true, http.StatusOK}, {"GET", statusPrefix + "nodes", nil, testCertsContext, true, http.StatusOK}, {"GET", statusPrefix + "nodes", nil, noCertsContext, true, http.StatusOK}, {"GET", statusPrefix + "nodes", nil, insecureContext, true, http.StatusPermanentRedirect}, // /ts/: ts.Server: no auth. {"GET", ts.URLPrefix, nil, rootCertsContext, true, http.StatusNotFound}, {"GET", ts.URLPrefix, nil, nodeCertsContext, true, http.StatusNotFound}, {"GET", ts.URLPrefix, nil, testCertsContext, true, http.StatusNotFound}, {"GET", ts.URLPrefix, nil, noCertsContext, true, http.StatusNotFound}, {"GET", ts.URLPrefix, nil, insecureContext, true, http.StatusPermanentRedirect}, } for tcNum, tc := range testCases { client, err := tc.ctx.GetHTTPClient() if err != nil { t.Fatalf("[%d]: failed to get http client: %v", tcNum, err) } url := fmt.Sprintf( "%s://%s%s", tc.ctx.HTTPRequestScheme(), s.(*TestServer).Cfg.HTTPAddr, tc.path) resp, err := doHTTPReq(t, client, tc.method, url, tc.body) if (err == nil) != tc.success { t.Errorf("[%d]: expected success=%t, got err=%v", tcNum, tc.success, err) } if err != nil { continue } defer resp.Body.Close() if resp.StatusCode != tc.code { t.Errorf("[%d]: expected status code %d, got %d", tcNum, tc.code, resp.StatusCode) } } }
// This is a fairly high-level test of CA and node certificates. // We construct SSL server and clients and use the generated certs. func TestUseCerts(t *testing.T) { defer leaktest.AfterTest(t)() // Do not mock cert access for this test. security.ResetReadFileFn() defer ResetTest() certsDir := util.CreateTempDir(t, "certs_test") defer util.CleanupDir(certsDir) err := security.RunCreateCACert( filepath.Join(certsDir, security.EmbeddedCACert), filepath.Join(certsDir, security.EmbeddedCAKey), 512) if err != nil { t.Fatalf("Expected success, got %v", err) } err = security.RunCreateNodeCert( filepath.Join(certsDir, security.EmbeddedCACert), filepath.Join(certsDir, security.EmbeddedCAKey), filepath.Join(certsDir, security.EmbeddedNodeCert), filepath.Join(certsDir, security.EmbeddedNodeKey), 512, []string{"127.0.0.1"}) if err != nil { t.Fatalf("Expected success, got %v", err) } err = security.RunCreateClientCert( filepath.Join(certsDir, security.EmbeddedCACert), filepath.Join(certsDir, security.EmbeddedCAKey), filepath.Join(certsDir, security.EmbeddedRootCert), filepath.Join(certsDir, security.EmbeddedRootKey), 512, security.RootUser) if err != nil { t.Fatalf("Expected success, got %v", err) } // Load TLS Configs. This is what TestServer and HTTPClient do internally. _, err = security.LoadServerTLSConfig( filepath.Join(certsDir, security.EmbeddedCACert), filepath.Join(certsDir, security.EmbeddedNodeCert), filepath.Join(certsDir, security.EmbeddedNodeKey)) if err != nil { t.Fatalf("Expected success, got %v", err) } _, err = security.LoadClientTLSConfig( filepath.Join(certsDir, security.EmbeddedCACert), filepath.Join(certsDir, security.EmbeddedNodeCert), filepath.Join(certsDir, security.EmbeddedNodeKey)) if err != nil { t.Fatalf("Expected success, got %v", err) } // Start a test server and override certs. // We use a real context since we want generated certs. params := base.TestServerArgs{ SSLCA: filepath.Join(certsDir, security.EmbeddedCACert), SSLCert: filepath.Join(certsDir, security.EmbeddedNodeCert), SSLCertKey: filepath.Join(certsDir, security.EmbeddedNodeKey), } s, _, _ := serverutils.StartServer(t, params) defer s.Stopper().Stop() // Insecure mode. clientContext := testutils.NewNodeTestBaseContext() clientContext.Insecure = true httpClient, err := clientContext.GetHTTPClient() if err != nil { t.Fatal(err) } req, err := http.NewRequest("GET", s.AdminURL()+"/_admin/v1/health", nil) if err != nil { t.Fatalf("could not create request: %v", err) } resp, err := httpClient.Do(req) if err == nil { resp.Body.Close() t.Fatalf("Expected SSL error, got success") } // New client. With certs this time. clientContext = testutils.NewNodeTestBaseContext() clientContext.SSLCA = filepath.Join(certsDir, security.EmbeddedCACert) clientContext.SSLCert = filepath.Join(certsDir, security.EmbeddedNodeCert) clientContext.SSLCertKey = filepath.Join(certsDir, security.EmbeddedNodeKey) httpClient, err = clientContext.GetHTTPClient() if err != nil { t.Fatalf("Expected success, got %v", err) } req, err = http.NewRequest("GET", s.AdminURL()+"/_admin/v1/health", nil) if err != nil { t.Fatalf("could not create request: %v", err) } resp, err = httpClient.Do(req) if err != nil { t.Fatalf("Expected success, got %v", err) } resp.Body.Close() if resp.StatusCode != http.StatusOK { t.Fatalf("Expected OK, got: %d", resp.StatusCode) } }
// Start constructs and starts the local test server and creates a // time series DB. func (tm *testModel) Start() { tm.LocalTestCluster.Start(tm.t, testutils.NewNodeTestBaseContext(), kv.InitSenderForLocalTestCluster) tm.DB = NewDB(tm.LocalTestCluster.DB) }
// TestUncertaintyObservedTimestampForwarding checks that when receiving an // uncertainty restart on a node, the next attempt to read (at the increased // timestamp) is free from uncertainty. See roachpb.Transaction for details. func TestUncertaintyMaxTimestampForwarding(t *testing.T) { defer leaktest.AfterTest(t)() dbCtx := client.DefaultDBContext() s := &localtestcluster.LocalTestCluster{ // Large offset so that any value in the future is an uncertain read. Also // makes sure that the values we write in the future below don't actually // wind up in the past. Clock: hlc.NewClock(hlc.UnixNano, 50*time.Second), DBContext: &dbCtx, } s.Start(t, testutils.NewNodeTestBaseContext(), InitSenderForLocalTestCluster) defer s.Stop() disableOwnNodeCertain(t, s) offsetNS := int64(100) keySlow := roachpb.Key("slow") keyFast := roachpb.Key("fast") valSlow := []byte("wols") valFast := []byte("tsaf") // Write keySlow at now+offset, keyFast at now+2*offset futureTS := s.Clock.Now() futureTS.WallTime += offsetNS val := roachpb.MakeValueFromBytes(valSlow) if err := engine.MVCCPut(context.Background(), s.Eng, nil, keySlow, futureTS, val, nil); err != nil { t.Fatal(err) } futureTS.WallTime += offsetNS val.SetBytes(valFast) if err := engine.MVCCPut(context.Background(), s.Eng, nil, keyFast, futureTS, val, nil); err != nil { t.Fatal(err) } i := 0 if tErr := s.DB.Txn(context.TODO(), func(txn *client.Txn) error { i++ // The first command serves to start a Txn, fixing the timestamps. // There will be a restart, but this is idempotent. if _, err := txn.Scan("t", roachpb.Key("t").Next(), 0); err != nil { t.Fatal(err) } // This is a bit of a hack for the sake of this test: By visiting the // node above, we've made a note of its clock, which allows us to // prevent the restart. But we want to catch the restart, so reset the // observed timestamps. txn.Proto.ResetObservedTimestamps() // The server's clock suddenly jumps ahead of keyFast's timestamp. s.Manual.Increment(2*offsetNS + 1) // Now read slowKey first. It should read at 0, catch an uncertainty error, // and get keySlow's timestamp in that error, but upgrade it to the larger // node clock (which is ahead of keyFast as well). If the last part does // not happen, the read of keyFast should fail (i.e. read nothing). // There will be exactly one restart here. if gr, err := txn.Get(keySlow); err != nil { if i != 1 { t.Fatalf("unexpected transaction error: %s", err) } return err } else if !gr.Exists() || !bytes.Equal(gr.ValueBytes(), valSlow) { t.Fatalf("read of %q returned %v, wanted value %q", keySlow, gr.Value, valSlow) } // The node should already be certain, so we expect no restart here // and to read the correct key. if gr, err := txn.Get(keyFast); err != nil { t.Fatalf("second Get failed with %s", err) } else if !gr.Exists() || !bytes.Equal(gr.ValueBytes(), valFast) { t.Fatalf("read of %q returned %v, wanted value %q", keyFast, gr.Value, valFast) } return nil }); tErr != nil { t.Fatal(tErr) } }
// newNodeTestContext returns a rpc.Context for testing. // It is meant to be used by nodes. func newNodeTestContext(clock *hlc.Clock, stopper *stop.Stopper) *rpc.Context { ctx := rpc.NewContext(log.AmbientContext{}, testutils.NewNodeTestBaseContext(), clock, stopper) ctx.HeartbeatInterval = 10 * time.Millisecond ctx.HeartbeatTimeout = 5 * time.Second return ctx }