Esempio n. 1
0
// TempRestrictedCopy creates a temporary on-disk copy of the embedded security asset
// with the provided path. The copy will be created as a temporary file in the
// provided directory; its filename will have the provided prefix. Returns the
// path of the temporary file and a cleanup function that will delete the
// temporary file.
//
// The temporary file will have restrictive file permissions (0600), making it
// appropriate for usage by libraries that require security assets to have such
// restrictive permissions.
func TempRestrictedCopy(t util.Tester, path, tempdir, prefix string) (string, func()) {
	contents, err := Asset(path)
	if err != nil {
		t.Fatal(err)
	}
	return util.CreateTempRestrictedFile(t, contents, tempdir, prefix)
}
func checkRangeReplication(t util.Tester, c *cluster.LocalCluster, d time.Duration) {
	// Always talk to node 0.
	client, dbStopper := makeClient(t, c.ConnString(0))
	defer dbStopper.Stop()

	wantedReplicas := 3
	if len(c.Nodes) < 3 {
		wantedReplicas = len(c.Nodes)
	}

	log.Infof("waiting for first range to have %d replicas", wantedReplicas)

	util.SucceedsWithin(t, d, func() error {
		select {
		case <-stopper:
			t.Fatalf("interrupted")
			return nil
		case <-time.After(1 * time.Second):
		}

		foundReplicas, err := countRangeReplicas(client)
		if err != nil {
			return err
		}

		log.Infof("found %d replicas", foundReplicas)
		if foundReplicas >= wantedReplicas {
			return nil
		}
		return fmt.Errorf("expected %d replicas, only found %d", wantedReplicas, foundReplicas)
	})
}
Esempio n. 3
0
// PGUrl returns a postgres connection url which connects to this server with
// the given user. Returns a connection string and a cleanup function which must
// be called after any connection created using the string has been closed.
//
// In order to connect securely using postgres, this method will create
// temporary on-disk copies of certain embedded security certificates. The
// certificates will be created as temporary files in the provided directory,
// and their filenames will have the provided prefix. The returned cleanup
// function will delete these temporary files.
func PGUrl(t util.Tester, ts *server.TestServer, user, tempDir, prefix string) (url.URL, func()) {
	host, port, err := net.SplitHostPort(ts.PGAddr())
	if err != nil {
		t.Fatal(err)
	}

	caPath := filepath.Join(security.EmbeddedCertsDir, "ca.crt")
	certPath := security.ClientCertPath(security.EmbeddedCertsDir, user)
	keyPath := security.ClientKeyPath(security.EmbeddedCertsDir, user)

	// Copy these assets to disk from embedded strings, so this test can
	// run from a standalone binary.
	tempCAPath, tempCACleanup := securitytest.TempRestrictedCopy(t, caPath, tempDir, "TestLogic_ca")
	tempCertPath, tempCertCleanup := securitytest.TempRestrictedCopy(t, certPath, tempDir, "TestLogic_cert")
	tempKeyPath, tempKeyCleanup := securitytest.TempRestrictedCopy(t, keyPath, tempDir, "TestLogic_key")

	return url.URL{
			Scheme: "postgres",
			User:   url.User(user),
			Host:   net.JoinHostPort(host, port),
			RawQuery: fmt.Sprintf("sslmode=verify-full&sslrootcert=%s&sslcert=%s&sslkey=%s",
				url.QueryEscape(tempCAPath),
				url.QueryEscape(tempCertPath),
				url.QueryEscape(tempKeyPath),
			),
		}, func() {
			tempCACleanup()
			tempCertCleanup()
			tempKeyCleanup()
		}
}
Esempio n. 4
0
// CheckKVs verifies that a KeyValue slice contains the expected keys and values.  The values can be
// either integers or strings; the expected results are passed as alternating keys and values, e.g:
//    checkScanResult(t, result, key1, val1, key2, val2)
func CheckKVs(t util.Tester, kvs []KeyValue, expected ...interface{}) {
	expLen := len(expected) / 2
	if expLen != len(kvs) {
		t.Errorf("%s: expected %d scan results, got %d", errInfo(), expLen, len(kvs))
		return
	}
	for i := 0; i < expLen; i++ {
		expKey := expected[2*i].(roachpb.Key)
		if key := kvs[i].Key; !key.Equal(expKey) {
			t.Errorf("%s: expected scan key %d to be %q; got %q", errInfo(), i, expKey, key)
		}
		switch expValue := expected[2*i+1].(type) {
		case int:
			if value, err := kvs[i].Value.GetInt(); err != nil {
				t.Errorf("%s: non-integer scan value %d: %q", errInfo(), i, kvs[i].Value)
			} else if value != int64(expValue) {
				t.Errorf("%s: expected scan value %d to be %d; got %d",
					errInfo(), i, expValue, value)
			}
		case string:
			if value := kvs[i].Value.String(); value != expValue {
				t.Errorf("%s: expected scan value %d to be %s; got %s",
					errInfo(), i, expValue, value)
			}
		default:
			panic(fmt.Sprintf("unsupported type %T", expValue))
		}
	}
}
Esempio n. 5
0
// Assert drains the Events channel and compares the actual events with those
// expected to have been generated by the operations performed on the nodes in
// the cluster (restart, kill, ...). In the event of a mismatch, the passed
// Tester receives a fatal error.
// Currently, the only events generated (and asserted against) are "die" and
// "restart", to maximize compatibility across different versions of Docker.
func (l *Cluster) Assert(t util.Tester) {
	filter := func(ch chan Event, wait time.Duration) *Event {
		for {
			select {
			case act := <-ch:
				if act.Status != "die" && act.Status != "restart" {
					continue
				}
				return &act
			case <-time.After(wait):
			}
			break
		}
		return nil
	}

	var events []Event
	for {
		exp := filter(l.expectedEvents, time.Duration(0))
		if exp == nil {
			break
		}
		act := filter(l.events, time.Second)
		if act == nil || *exp != *act {
			t.Fatalf("expected event %v, got %v (after %v)", exp, act, events)
		}
		events = append(events, *exp)
	}
	if cur := filter(l.events, time.Duration(0)); cur != nil {
		t.Fatalf("unexpected extra event %v (after %v)", cur, events)
	}
	if log.V(2) {
		log.Infof("asserted %v", events)
	}
}
Esempio n. 6
0
// StartTestServer starts a in-memory test server.
// Adds a permissions config for 'TestUser' under prefix 'TestUser'.
func StartTestServer(t util.Tester) *TestServer {
	s := &TestServer{}
	if err := s.Start(); err != nil {
		if t != nil {
			t.Fatalf("Could not start server: %v", err)
		} else {
			log.Fatalf("Could not start server: %v", err)
		}
	}

	// Setup permissions for a test user.
	err := s.WritePermissionConfig(TestUser,
		&proto.PermConfig{
			Read:  []string{TestUser},
			Write: []string{TestUser},
		})
	if err != nil {
		if t != nil {
			t.Fatalf("Error adding permissions config for %s: %v", TestUser, err)
		} else {
			log.Fatalf("Error adding permissions config for %s: %v", TestUser, err)
		}
	}

	log.Infof("Test server listening on %s: %s", s.Ctx.RequestScheme(), s.ServingAddr())
	return s
}
Esempio n. 7
0
// StartTestServer starts a in-memory test server.
func StartTestServer(t util.Tester) *TestServer {
	s := &TestServer{}
	if err := s.Start(); err != nil {
		if t != nil {
			t.Fatalf("Could not start server: %v", err)
		} else {
			log.Fatalf("Could not start server: %v", err)
		}
	}
	return s
}
Esempio n. 8
0
func makeClient(t util.Tester, str string) (*client.DB, *stop.Stopper) {
	stopper := stop.NewStopper()

	db, err := client.Open(stopper, str)

	if err != nil {
		t.Fatal(err)
	}

	return db, stopper
}
Esempio n. 9
0
// NewTestHTTPSession constructs a new TestHTTPSession. The session will
// instantiate a client using the based base context. All HTTP requests from the
// session will be sent to the given baseUrl.
//
// baseUrl should be specified *without* a request scheme (i.e. "http://"); the
// request scheme will be used from the context.
//
// If an error occurs in HTTP layer during any session operation, a Fatal method
// will be called on the supplied t.Tester.
func NewTestHTTPSession(t util.Tester, ctx *base.Context, baseURL string) *TestHTTPSession {
	client, err := ctx.GetHTTPClient()
	if err != nil {
		t.Fatalf("error creating client: %s", err)
	}
	return &TestHTTPSession{
		t:       t,
		client:  client,
		baseURL: ctx.RequestScheme() + "://" + baseURL,
	}
}
Esempio n. 10
0
// StartTestServerWithContext starts an in-memory test server.
// ctx can be nil, in which case a default context will be created.
func StartTestServerWithContext(t util.Tester, ctx *Context) TestServer {
	s := TestServer{Ctx: ctx}
	if err := s.Start(); err != nil {
		if t != nil {
			t.Fatalf("Could not start server: %v", err)
		} else {
			log.Fatalf("Could not start server: %v", err)
		}
	}
	return s
}
Esempio n. 11
0
// RestrictedCopy creates an on-disk copy of the embedded security asset
// with the provided path. The copy will be created in the provided directory.
// Returns the path of the file and a cleanup function that will delete the file.
//
// The file will have restrictive file permissions (0600), making it
// appropriate for usage by libraries that require security assets to have such
// restrictive permissions.
func RestrictedCopy(t util.Tester, path, tempdir, name string) string {
	contents, err := Asset(path)
	if err != nil {
		if t == nil {
			log.Fatal(err)
		} else {
			t.Fatal(err)
		}
	}
	return util.CreateRestrictedFile(t, contents, tempdir, name)
}
Esempio n. 12
0
// StartTestServer starts a in-memory test server.
func StartTestServer(t util.Tester) *TestServer {
	s := &TestServer{}
	if err := s.Start(); err != nil {
		if t != nil {
			t.Fatalf("Could not start server: %v", err)
		} else {
			log.Fatalf("Could not start server: %v", err)
		}
	}
	log.Infof("Test server listening on %s: %s", s.Ctx.RequestScheme(), s.ServingAddr())
	return s
}
Esempio n. 13
0
// CheckKeysInKVs verifies that a KeyValue slice contains the given keys.
func CheckKeysInKVs(t util.Tester, kvs []KeyValue, keys ...string) {
	if len(keys) != len(kvs) {
		t.Errorf("%s: expected %d scan results, got %d", errInfo(), len(keys), len(kvs))
		return
	}
	for i, kv := range kvs {
		expKey := keys[i]
		if key := string(kv.Key); key != keys[i] {
			t.Errorf("%s: expected scan key %d to be %q; got %q", errInfo(), i, expKey, key)
		}
	}
}
Esempio n. 14
0
// MakeClient creates a DB client for node 'i' using the cluster certs dir.
func (l *LocalCluster) MakeClient(t util.Tester, node int) (*client.DB, *stop.Stopper) {
	stopper := stop.NewStopper()

	db, err := client.Open(stopper, "rpcs://"+security.NodeUser+"@"+
		l.Nodes[node].Addr("").String()+
		"?certs="+l.CertsDir)

	if err != nil {
		t.Fatal(err)
	}

	return db, stopper
}
Esempio n. 15
0
// StartInsecureTestServer starts an insecure in-memory test server.
func StartInsecureTestServer(t util.Tester) *TestServer {
	s := &TestServer{Ctx: NewTestContext()}
	s.Ctx.Insecure = true

	if err := s.Start(); err != nil {
		if t != nil {
			t.Fatalf("Could not start server: %v", err)
		} else {
			log.Fatalf("Could not start server: %v", err)
		}
	}
	return s
}
Esempio n. 16
0
// StartTestServerJoining starts an in-memory test server that attempts to join `other`.
func StartTestServerJoining(t util.Tester, other *TestServer) *TestServer {
	s := &TestServer{Ctx: NewTestContext()}
	s.Ctx.JoinUsing = other.ServingAddr()
	if err := s.Start(); err != nil {
		if t != nil {
			t.Fatalf("Could not start server: %v", err)
		} else {
			log.Fatalf("Could not start server: %v", err)
		}
	}
	log.Infof("Node ID: %d", s.Gossip().GetNodeID())
	return s
}
Esempio n. 17
0
// StartInsecureTestServer starts an insecure in-memory test server.
func StartInsecureTestServer(t util.Tester) TestServer {
	ctx := MakeTestContext()
	ctx.Insecure = true
	s := TestServer{Ctx: &ctx}

	if err := s.Start(); err != nil {
		if t != nil {
			t.Fatalf("Could not start server: %v", err)
		} else {
			log.Fatalf("Could not start server: %v", err)
		}
	}
	return s
}
Esempio n. 18
0
// MakeClient returns a client which is pointing at the node with the given
// index. The given integer must be in the range [0,NumNodes()-1].
func (r *RemoteCluster) MakeClient(t util.Tester, i int) (*client.DB, *stop.Stopper) {
	stopper := stop.NewStopper()

	// TODO(tschottdorf,mberhault): TLS all the things!
	db, err := client.Open(stopper, "rpc://"+"root"+"@"+
		util.EnsureHostPort(r.nodes[i].Addr)+
		"?certs="+"certswhocares")

	if err != nil {
		t.Fatal(err)
	}

	return db, stopper
}
Esempio n. 19
0
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester) {
	ltc.Manual = hlc.NewManualClock(0)
	ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
	ltc.Stopper = stop.NewStopper()
	rpcContext := rpc.NewContext(testutils.NewRootTestBaseContext(), ltc.Clock, ltc.Stopper)
	ltc.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
	ltc.Eng = engine.NewInMem(proto.Attributes{}, 50<<20)
	ltc.lSender = newRetryableLocalSender(NewLocalSender())
	ltc.Sender = NewTxnCoordSender(ltc.lSender, ltc.Clock, false, nil, ltc.Stopper)
	var err error
	if ltc.DB, err = client.Open("//root@", client.SenderOpt(ltc.Sender)); err != nil {
		t.Fatal(err)
	}
	transport := multiraft.NewLocalRPCTransport(ltc.Stopper)
	ltc.Stopper.AddCloser(transport)
	ctx := storage.TestStoreContext
	ctx.Clock = ltc.Clock
	ctx.DB = ltc.DB
	ctx.Gossip = ltc.Gossip
	ctx.Transport = transport
	ltc.Store = storage.NewStore(ctx, ltc.Eng, &proto.NodeDescriptor{NodeID: 1})
	if err := ltc.Store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	ltc.lSender.AddStore(ltc.Store)
	if err := ltc.Store.BootstrapRange(nil); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	if err := ltc.Store.Start(ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
}
Esempio n. 20
0
// makeDBClientForUser creates a DB client for node 'i' and user 'user'.
func makeDBClientForUser(t util.Tester, lc *LocalCluster, user string, node int) (*client.DB, *stop.Stopper) {
	stopper := stop.NewStopper()

	// We need to run with "InsecureSkipVerify" (set when Certs="" inside the http sender).
	// This is due to the fact that we're running outside docker, so we cannot use a fixed hostname
	// to reach the cluster. This in turn means that we do not have a verified server name in the certs.
	db, err := client.Open(stopper, "rpcs://"+user+"@"+
		lc.Nodes[node].Addr("").String()+
		"?certs="+lc.CertsDir)

	if err != nil {
		t.Fatal(err)
	}

	return db, stopper
}
Esempio n. 21
0
// Assert verifies that the cluster state is as expected (i.e. no unexpected
// restarts or node deaths occurred). Tests can call this periodically to
// ascertain cluster health.
// TODO(tschottdorf): unimplemented when nodes are expected down.
func (f *Farmer) Assert(t util.Tester) {
	for _, item := range []struct {
		typ   string
		hosts []string
	}{
		{"cockroach", f.Nodes()},
		{"block_writer", f.Writers()},
	} {
		for i, host := range item.hosts {
			out, _, err := f.execSupervisor(host, "status "+item.typ)
			if err != nil {
				t.Fatal(err)
			}
			if !strings.Contains(out, "RUNNING") {
				t.Fatalf("%s %d (%s) is down:\n%s", item.typ, i, host, out)
			}
		}
	}
}
Esempio n. 22
0
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester, baseCtx *base.Context, initSender InitSenderFn) {
	nodeID := roachpb.NodeID(1)
	nodeDesc := &roachpb.NodeDescriptor{NodeID: nodeID}
	tracer := tracing.NewTracer()
	ltc.tester = t
	ltc.Manual = hlc.NewManualClock(0)
	ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
	ltc.Stopper = stop.NewStopper()
	rpcContext := rpc.NewContext(baseCtx, ltc.Clock, ltc.Stopper)
	server := rpc.NewServer(rpcContext) // never started
	ltc.Gossip = gossip.New(
		context.Background(), rpcContext, server, nil, ltc.Stopper, metric.NewRegistry())
	ltc.Eng = engine.NewInMem(roachpb.Attributes{}, 50<<20, ltc.Stopper)

	ltc.Stores = storage.NewStores(ltc.Clock)

	ltc.Sender = initSender(nodeDesc, tracer, ltc.Clock, ltc.Latency, ltc.Stores, ltc.Stopper,
		ltc.Gossip)
	if ltc.DBContext == nil {
		dbCtx := client.DefaultDBContext()
		ltc.DBContext = &dbCtx
	}
	ltc.DB = client.NewDBWithContext(ltc.Sender, *ltc.DBContext)
	transport := storage.NewDummyRaftTransport()
	ctx := storage.TestStoreContext()
	if ltc.RangeRetryOptions != nil {
		ctx.RangeRetryOptions = *ltc.RangeRetryOptions
	}
	ctx.Ctx = tracing.WithTracer(context.Background(), tracer)
	ctx.Clock = ltc.Clock
	ctx.DB = ltc.DB
	ctx.Gossip = ltc.Gossip
	ctx.Transport = transport
	ltc.Store = storage.NewStore(ctx, ltc.Eng, nodeDesc)
	if err := ltc.Store.Bootstrap(roachpb.StoreIdent{NodeID: nodeID, StoreID: 1}, ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	ltc.Stores.AddStore(ltc.Store)
	if err := ltc.Store.BootstrapRange(nil); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	if err := ltc.Store.Start(context.Background(), ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	ltc.Gossip.SetNodeID(nodeDesc.NodeID)
	if err := ltc.Gossip.SetNodeDescriptor(nodeDesc); err != nil {
		t.Fatalf("unable to set node descriptor: %s", err)
	}
}
Esempio n. 23
0
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester) {

	nodeDesc := &proto.NodeDescriptor{NodeID: 1}
	ltc.tester = t
	ltc.Manual = hlc.NewManualClock(0)
	ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
	ltc.Stopper = stop.NewStopper()
	rpcContext := rpc.NewContext(testutils.NewNodeTestBaseContext(), ltc.Clock, ltc.Stopper)
	ltc.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
	ltc.Eng = engine.NewInMem(proto.Attributes{}, 50<<20, ltc.Stopper)

	ltc.localSender = NewLocalSender()
	var rpcSend rpcSendFn = func(_ rpc.Options, _ string, _ []net.Addr,
		getArgs func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message,
		_ *rpc.Context) ([]gogoproto.Message, error) {
		// TODO(tschottdorf): remove getReply().
		br, pErr := ltc.localSender.Send(context.Background(), *getArgs(nil).(*proto.BatchRequest))
		if br == nil {
			br = &proto.BatchResponse{}
		}
		if br.Error != nil {
			panic(proto.ErrorUnexpectedlySet(ltc.localSender, br))
		}
		br.Error = pErr
		return []gogoproto.Message{br}, nil
	}
	ltc.distSender = NewDistSender(&DistSenderContext{
		Clock: ltc.Clock,
		RangeDescriptorCacheSize: defaultRangeDescriptorCacheSize,
		RangeLookupMaxRanges:     defaultRangeLookupMaxRanges,
		LeaderCacheSize:          defaultLeaderCacheSize,
		RPCRetryOptions:          &defaultRPCRetryOptions,
		nodeDescriptor:           nodeDesc,
		RPCSend:                  rpcSend,         // defined above
		RangeDescriptorDB:        ltc.localSender, // for descriptor lookup
	}, ltc.Gossip)

	ltc.Sender = NewTxnCoordSender(ltc.distSender, ltc.Clock, false /* !linearizable */, nil /* tracer */, ltc.Stopper)
	ltc.DB = client.NewDB(ltc.Sender)

	transport := multiraft.NewLocalRPCTransport(ltc.Stopper)
	ltc.Stopper.AddCloser(transport)
	ctx := storage.TestStoreContext
	ctx.Clock = ltc.Clock
	ctx.DB = ltc.DB
	ctx.Gossip = ltc.Gossip
	ctx.Transport = transport
	ltc.Store = storage.NewStore(ctx, ltc.Eng, nodeDesc)
	if err := ltc.Store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	ltc.localSender.AddStore(ltc.Store)
	if err := ltc.Store.BootstrapRange(nil); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	if err := ltc.Store.Start(ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
}
Esempio n. 24
0
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester, baseCtx *base.Context, initSender InitSenderFn) {
	nodeID := roachpb.NodeID(1)
	nodeDesc := &roachpb.NodeDescriptor{NodeID: nodeID}
	tracer := tracing.NewTracer()
	ltc.tester = t
	ltc.Manual = hlc.NewManualClock(0)
	ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
	ltc.Stopper = stop.NewStopper()
	rpcContext := rpc.NewContext(baseCtx, ltc.Clock, ltc.Stopper)
	ltc.Gossip = gossip.New(rpcContext, nil, ltc.Stopper)
	ltc.Eng = engine.NewInMem(roachpb.Attributes{}, 50<<20, ltc.Stopper)

	ltc.Stores = storage.NewStores(ltc.Clock)

	ltc.Sender = initSender(nodeDesc, tracer, ltc.Clock, ltc.Latency, ltc.Stores, ltc.Stopper,
		ltc.Gossip)
	ltc.DB = client.NewDB(ltc.Sender)
	transport := storage.NewDummyRaftTransport()
	ctx := storage.TestStoreContext()
	ctx.Clock = ltc.Clock
	ctx.DB = ltc.DB
	ctx.Gossip = ltc.Gossip
	ctx.Transport = transport
	ctx.Tracer = tracer
	ltc.Store = storage.NewStore(ctx, ltc.Eng, nodeDesc)
	if err := ltc.Store.Bootstrap(roachpb.StoreIdent{NodeID: nodeID, StoreID: 1}, ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	ltc.Stores.AddStore(ltc.Store)
	if err := ltc.Store.BootstrapRange(nil); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	if err := ltc.Store.Start(ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	ltc.Gossip.SetNodeID(nodeDesc.NodeID)
	if err := ltc.Gossip.SetNodeDescriptor(nodeDesc); err != nil {
		t.Fatalf("unable to set node descriptor: %s", err)
	}
}
Esempio n. 25
0
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester) {

	nodeID := roachpb.NodeID(1)
	nodeDesc := &roachpb.NodeDescriptor{NodeID: nodeID}
	ltc.tester = t
	ltc.Manual = hlc.NewManualClock(0)
	ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
	ltc.Stopper = stop.NewStopper()
	rpcContext := rpc.NewContext(testutils.NewNodeTestBaseContext(), ltc.Clock, ltc.Stopper)
	ltc.Gossip = gossip.New(rpcContext, gossip.TestBootstrap, ltc.Stopper)
	ltc.Eng = engine.NewInMem(roachpb.Attributes{}, 50<<20, ltc.Stopper)

	ltc.stores = storage.NewStores(ltc.Clock)
	var rpcSend rpcSendFn = func(_ SendOptions, _ string, _ []net.Addr,
		getArgs func(addr net.Addr) proto.Message, getReply func() proto.Message,
		_ *rpc.Context) (proto.Message, error) {
		// TODO(tschottdorf): remove getReply().
		if ltc.Latency > 0 {
			time.Sleep(ltc.Latency)
		}
		br, pErr := ltc.stores.Send(context.Background(), *getArgs(nil).(*roachpb.BatchRequest))
		if br == nil {
			br = &roachpb.BatchResponse{}
		}
		if br.Error != nil {
			panic(roachpb.ErrorUnexpectedlySet(ltc.stores, br))
		}
		br.Error = pErr
		return br, nil
	}
	retryOpts := GetDefaultDistSenderRetryOptions()
	retryOpts.Closer = ltc.Stopper.ShouldDrain()
	ltc.distSender = NewDistSender(&DistSenderContext{
		Clock: ltc.Clock,
		RangeDescriptorCacheSize: defaultRangeDescriptorCacheSize,
		RangeLookupMaxRanges:     defaultRangeLookupMaxRanges,
		LeaderCacheSize:          defaultLeaderCacheSize,
		RPCRetryOptions:          &retryOpts,
		nodeDescriptor:           nodeDesc,
		RPCSend:                  rpcSend,    // defined above
		RangeDescriptorDB:        ltc.stores, // for descriptor lookup
	}, ltc.Gossip)

	ltc.Sender = NewTxnCoordSender(ltc.distSender, ltc.Clock, false /* !linearizable */, nil /* tracer */, ltc.Stopper)
	ltc.DB = client.NewDB(ltc.Sender)

	transport := storage.NewLocalRPCTransport(ltc.Stopper)
	ltc.Stopper.AddCloser(transport)
	ctx := storage.TestStoreContext
	ctx.Clock = ltc.Clock
	ctx.DB = ltc.DB
	ctx.Gossip = ltc.Gossip
	ctx.Transport = transport
	ltc.Store = storage.NewStore(ctx, ltc.Eng, nodeDesc)
	if err := ltc.Store.Bootstrap(roachpb.StoreIdent{NodeID: nodeID, StoreID: 1}, ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	ltc.stores.AddStore(ltc.Store)
	if err := ltc.Store.BootstrapRange(nil); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	if err := ltc.Store.Start(ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	ltc.Gossip.SetNodeID(nodeDesc.NodeID)
	if err := ltc.Gossip.SetNodeDescriptor(nodeDesc); err != nil {
		t.Fatalf("unable to set node descriptor: %s", err)
	}
}
Esempio n. 26
0
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester) {
	nodeID := roachpb.NodeID(1)
	nodeDesc := &roachpb.NodeDescriptor{NodeID: nodeID}
	ltc.tester = t
	ltc.Manual = hlc.NewManualClock(0)
	ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
	ltc.Stopper = stop.NewStopper()
	rpcContext := rpc.NewContext(testutils.NewNodeTestBaseContext(), ltc.Clock, ltc.Stopper)
	ltc.Gossip = gossip.New(rpcContext, gossip.TestBootstrap, ltc.Stopper)
	ltc.Eng = engine.NewInMem(roachpb.Attributes{}, 50<<20, ltc.Stopper)

	ltc.stores = storage.NewStores(ltc.Clock)
	tracer := tracing.NewTracer()
	var rpcSend rpcSendFn = func(_ SendOptions, _ ReplicaSlice,
		args roachpb.BatchRequest, _ *rpc.Context) (proto.Message, error) {
		if ltc.Latency > 0 {
			time.Sleep(ltc.Latency)
		}
		sp := tracer.StartSpan("node")
		defer sp.Finish()
		ctx := opentracing.ContextWithSpan(context.Background(), sp)
		sp.LogEvent(args.String())
		br, pErr := ltc.stores.Send(ctx, args)
		if br == nil {
			br = &roachpb.BatchResponse{}
		}
		if br.Error != nil {
			panic(roachpb.ErrorUnexpectedlySet(ltc.stores, br))
		}
		br.Error = pErr
		if pErr != nil {
			sp.LogEvent("error: " + pErr.String())
		}
		return br, nil
	}
	retryOpts := GetDefaultDistSenderRetryOptions()
	retryOpts.Closer = ltc.Stopper.ShouldDrain()
	ltc.distSender = NewDistSender(&DistSenderContext{
		Clock: ltc.Clock,
		RangeDescriptorCacheSize: defaultRangeDescriptorCacheSize,
		RangeLookupMaxRanges:     defaultRangeLookupMaxRanges,
		LeaderCacheSize:          defaultLeaderCacheSize,
		RPCRetryOptions:          &retryOpts,
		nodeDescriptor:           nodeDesc,
		RPCSend:                  rpcSend,    // defined above
		RangeDescriptorDB:        ltc.stores, // for descriptor lookup
	}, ltc.Gossip)

	ltc.Sender = NewTxnCoordSender(ltc.distSender, ltc.Clock, false /* !linearizable */, tracer,
		ltc.Stopper, NewTxnMetrics(metric.NewRegistry()))
	ltc.DB = client.NewDB(ltc.Sender)
	transport := storage.NewDummyRaftTransport()
	ctx := storage.TestStoreContext()
	ctx.Clock = ltc.Clock
	ctx.DB = ltc.DB
	ctx.Gossip = ltc.Gossip
	ctx.Transport = transport
	ctx.Tracer = tracer
	ltc.Store = storage.NewStore(ctx, ltc.Eng, nodeDesc)
	if err := ltc.Store.Bootstrap(roachpb.StoreIdent{NodeID: nodeID, StoreID: 1}, ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	ltc.stores.AddStore(ltc.Store)
	if err := ltc.Store.BootstrapRange(nil); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	if err := ltc.Store.Start(ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	ltc.Gossip.SetNodeID(nodeDesc.NodeID)
	if err := ltc.Gossip.SetNodeDescriptor(nodeDesc); err != nil {
		t.Fatalf("unable to set node descriptor: %s", err)
	}
}
Esempio n. 27
0
// AssertAndStop performs the same test as Assert but then proceeds to
// dismantle the cluster.
func (f *Farmer) AssertAndStop(t util.Tester) {
	if err := f.Destroy(); err != nil {
		t.Fatal(err)
	}
}