func createTestClient(addr string) *client.DB { db, err := client.Open("https://root@" + addr + "?certs=" + security.EmbeddedCertsDir) if err != nil { log.Fatal(err) } return db }
func (d *roachDriver) Open(dsn string) (driver.Conn, error) { db, err := client.Open(dsn) if err != nil { return nil, err } u, err := url.Parse(dsn) if err != nil { return nil, err } ctx := &base.Context{} ctx.InitDefaults() if u.User != nil { ctx.User = u.User.Username() } q := u.Query() if dir := q["certs"]; len(dir) > 0 { ctx.Certs = dir[0] } sender, err := newHTTPSender(u.Host, ctx, client.DefaultTxnRetryOptions) if err != nil { return nil, err } return &conn{db: db, sender: sender}, nil }
func (d *roachDriver) Open(dsn string) (driver.Conn, error) { db, err := client.Open(dsn) if err != nil { return nil, err } return &conn{db: db}, nil }
// Start starts the test cluster by bootstrapping an in-memory store // (defaults to maximum of 50M). The server is started, launching the // node RPC server and all HTTP endpoints. Use the value of // TestServer.Addr after Start() for client connections. Use Stop() // to shutdown the server after the test completes. func (ltc *LocalTestCluster) Start(t util.Tester) { ltc.Manual = hlc.NewManualClock(0) ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano) ltc.Stopper = stop.NewStopper() rpcContext := rpc.NewContext(testutils.NewRootTestBaseContext(), ltc.Clock, ltc.Stopper) ltc.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap) ltc.Eng = engine.NewInMem(proto.Attributes{}, 50<<20) ltc.lSender = newRetryableLocalSender(NewLocalSender()) ltc.Sender = NewTxnCoordSender(ltc.lSender, ltc.Clock, false, nil, ltc.Stopper) var err error if ltc.DB, err = client.Open("//root@", client.SenderOpt(ltc.Sender)); err != nil { t.Fatal(err) } transport := multiraft.NewLocalRPCTransport(ltc.Stopper) ltc.Stopper.AddCloser(transport) ctx := storage.TestStoreContext ctx.Clock = ltc.Clock ctx.DB = ltc.DB ctx.Gossip = ltc.Gossip ctx.Transport = transport ltc.Store = storage.NewStore(ctx, ltc.Eng, &proto.NodeDescriptor{NodeID: 1}) if err := ltc.Store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, ltc.Stopper); err != nil { t.Fatalf("unable to start local test cluster: %s", err) } ltc.lSender.AddStore(ltc.Store) if err := ltc.Store.BootstrapRange(nil); err != nil { t.Fatalf("unable to start local test cluster: %s", err) } if err := ltc.Store.Start(ltc.Stopper); err != nil { t.Fatalf("unable to start local test cluster: %s", err) } }
// createTestStoreWithoutStart creates a test store using an in-memory // engine without starting the store. It returns the store, the store // clock's manual unix nanos time and a stopper. The caller is // responsible for stopping the stopper upon completion. func createTestStoreWithoutStart(t *testing.T) (*Store, *hlc.ManualClock, *stop.Stopper) { stopper := stop.NewStopper() rpcContext := rpc.NewContext(rootTestBaseContext, hlc.NewClock(hlc.UnixNano), stopper) ctx := TestStoreContext ctx.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap) manual := hlc.NewManualClock(0) ctx.Clock = hlc.NewClock(manual.UnixNano) eng := engine.NewInMem(proto.Attributes{}, 10<<20) ctx.Transport = multiraft.NewLocalRPCTransport() stopper.AddCloser(ctx.Transport) sender := &testSender{} var err error if ctx.DB, err = client.Open("//root@", client.SenderOpt(sender)); err != nil { t.Fatal(err) } store := NewStore(ctx, eng, &proto.NodeDescriptor{NodeID: 1}) sender.store = store if err := store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, stopper); err != nil { t.Fatal(err) } if err := store.BootstrapRange(); err != nil { t.Fatal(err) } return store, manual, stopper }
// createTestNode creates an rpc server using the specified address, // gossip instance, KV database and a node using the specified slice // of engines. The server, clock and node are returned. If gossipBS is // not nil, the gossip bootstrap address is set to gossipBS. func createTestNode(addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T) ( *rpc.Server, *hlc.Clock, *Node, *stop.Stopper) { var err error ctx := storage.StoreContext{} stopper := stop.NewStopper() ctx.Clock = hlc.NewClock(hlc.UnixNano) nodeRPCContext := rpc.NewContext(nodeTestBaseContext, ctx.Clock, stopper) ctx.ScanInterval = 10 * time.Hour rpcServer := rpc.NewServer(addr, nodeRPCContext) if err := rpcServer.Start(); err != nil { t.Fatal(err) } g := gossip.New(nodeRPCContext, testContext.GossipInterval, testContext.GossipBootstrapResolvers) if gossipBS != nil { // Handle possibility of a :0 port specification. if gossipBS == addr { gossipBS = rpcServer.Addr() } g.SetResolvers([]resolver.Resolver{resolver.NewResolverFromAddress(gossipBS)}) g.Start(rpcServer, stopper) } ctx.Gossip = g sender := kv.NewDistSender(&kv.DistSenderContext{Clock: ctx.Clock}, g) if ctx.DB, err = client.Open("//root@", client.SenderOpt(sender)); err != nil { t.Fatal(err) } // TODO(bdarnell): arrange to have the transport closed. // (or attach LocalRPCTransport.Close to the stopper) ctx.Transport = multiraft.NewLocalRPCTransport(stopper) ctx.EventFeed = util.NewFeed(stopper) node := NewNode(ctx) return rpcServer, ctx.Clock, node, stopper }
// BootstrapCluster bootstraps a multiple stores using the provided engines and // cluster ID. The first bootstrapped store contains a single range spanning // all keys. Initial range lookup metadata is populated for the range. // // Returns a KV client for unittest purposes. Caller should close the returned // client. func BootstrapCluster(clusterID string, engines []engine.Engine, stopper *stop.Stopper) (*client.DB, error) { ctx := storage.StoreContext{} ctx.ScanInterval = 10 * time.Minute ctx.Clock = hlc.NewClock(hlc.UnixNano) // Create a KV DB with a local sender. lSender := kv.NewLocalSender() sender := kv.NewTxnCoordSender(lSender, ctx.Clock, false, nil, stopper) var err error if ctx.DB, err = client.Open("//root@", client.SenderOpt(sender)); err != nil { return nil, err } ctx.Transport = multiraft.NewLocalRPCTransport(stopper) for i, eng := range engines { sIdent := proto.StoreIdent{ ClusterID: clusterID, NodeID: 1, StoreID: proto.StoreID(i + 1), } // The bootstrapping store will not connect to other nodes so its // StoreConfig doesn't really matter. s := storage.NewStore(ctx, eng, &proto.NodeDescriptor{NodeID: 1}) // Verify the store isn't already part of a cluster. if len(s.Ident.ClusterID) > 0 { return nil, util.Errorf("storage engine already belongs to a cluster (%s)", s.Ident.ClusterID) } // Bootstrap store to persist the store ident. if err := s.Bootstrap(sIdent, stopper); err != nil { return nil, err } // Create first range, writing directly to engine. Note this does // not create the range, just its data. Only do this if this is the // first store. if i == 0 { if err := s.BootstrapRange(); err != nil { return nil, err } } if err := s.Start(stopper); err != nil { return nil, err } lSender.AddStore(s) // Initialize node and store ids. Only initialize the node once. if i == 0 { if nodeID, err := allocateNodeID(ctx.DB); nodeID != sIdent.NodeID || err != nil { return nil, util.Errorf("expected to initialize node id allocator to %d, got %d: %s", sIdent.NodeID, nodeID, err) } } if storeID, err := allocateStoreIDs(sIdent.NodeID, 1, ctx.DB); storeID != sIdent.StoreID || err != nil { return nil, util.Errorf("expected to initialize store id allocator to %d, got %d: %s", sIdent.StoreID, storeID, err) } } return ctx.DB, nil }
func ExampleDB_Put_insecure() { s := &server.TestServer{} s.Ctx = server.NewTestContext() s.Ctx.Insecure = true if pErr := s.Start(); pErr != nil { log.Fatalf("Could not start server: %v", pErr) } defer s.Stop() db, err := client.Open(s.Stopper(), "rpc://foo@"+s.ServingAddr()) if err != nil { log.Fatal(err) } if pErr := db.Put("aa", "1"); pErr != nil { panic(pErr) } result, pErr := db.Get("aa") if pErr != nil { panic(pErr) } fmt.Printf("aa=%s\n", result.ValueBytes()) // Output: // aa=1 }
func ExampleDB_Insecure() { s := &server.TestServer{} s.Ctx = server.NewTestContext() s.Ctx.Insecure = true if err := s.Start(); err != nil { log.Fatalf("Could not start server: %v", err) } log.Printf("Test server listening on %s: %s", s.Ctx.RequestScheme(), s.ServingAddr()) defer s.Stop() db, err := client.Open("http://root@" + s.ServingAddr()) if err != nil { log.Fatal(err) } if err := db.Put("aa", "1"); err != nil { panic(err) } result, err := db.Get("aa") if err != nil { panic(err) } fmt.Printf("aa=%s\n", result.ValueBytes()) // Output: // aa=1 }
func createTestClientFor(stopper *stop.Stopper, addr, user string) *client.DB { db, err := client.Open(stopper, "rpcs://"+user+"@"+addr+"?certs="+security.EmbeddedCertsDir) if err != nil { log.Fatal(err) } return db }
func makeClient(t *testing.T, str string) (*client.DB, *stop.Stopper) { stopper := stop.NewStopper() db, err := client.Open(stopper, str) if err != nil { t.Fatal(err) } return db, stopper }
func createTestClientForUser(t *testing.T, addr, user string) *client.DB { db, err := client.Open(fmt.Sprintf("https://%s@%s?certs=%s", user, addr, security.EmbeddedCertsDir)) if err != nil { t.Fatal(err) } return db }
func createTestClientForUser(t *testing.T, stopper *stop.Stopper, addr, user string) *client.DB { db, err := client.Open(stopper, fmt.Sprintf("rpcs://%s@%s?certs=%s&failfast=1", user, addr, security.EmbeddedCertsDir)) if err != nil { t.Fatal(err) } return db }
func setup() (*server.TestServer, *client.DB) { s := server.StartTestServer(nil) db, err := client.Open("https://root@" + s.ServingAddr() + "?certs=test_certs") if err != nil { log.Fatal(err) } return s, db }
// createTestNotifyClient creates a new client which connects using an HTTP // sender to the server at addr. It contains a waitgroup to allow waiting. func createTestNotifyClient(addr string, priority int) (*client.DB, *notifyingSender) { db, err := client.Open(fmt.Sprintf("https://root@%s?certs=%s&priority=%d", addr, security.EmbeddedCertsDir, priority)) if err != nil { log.Fatal(err) } sender := ¬ifyingSender{wrapped: db.Sender} db.Sender = sender return db, sender }
func setup() (*server.TestServer, *client.DB) { s := server.StartTestServer(nil) db, err := client.Open(s.Stopper(), fmt.Sprintf("rpcs://%s@%s?certs=test_certs", security.NodeUser, s.ServingAddr())) if err != nil { log.Fatal(err) } return s, db }
// WritePermissionConfig writes the passed-in 'cfg' permissions config // for the 'path' key prefix. func (ts *TestServer) WritePermissionConfig(path string, cfg *proto.PermConfig) error { // The testserver is running as "node". However, things like config changes are generally // done as root. db, err := client.Open(ts.Ctx.RequestScheme() + "://root@" + ts.ServingAddr() + "?certs=test_certs") if err != nil { return err } key := keys.MakeKey(keys.ConfigPermissionPrefix, proto.Key(path)) return db.Put(key, cfg) }
func makeClient() *client.DB { stopper := stop.NewStopper() addr := fmt.Sprintf("rpc://[email protected]:9001?certs=certs") // Create a database handle. db, err := client.Open(stopper, addr) if err != nil { panic(fmt.Sprintf("open client fail. addr: %v, err: %v\n", addr, err)) } return db }
// createTestNotifyClient creates a new client which connects using an HTTP // sender to the server at addr. It contains a waitgroup to allow waiting. func createTestNotifyClient(stopper *stop.Stopper, addr string, priority roachpb.UserPriority) (*client.DB, *notifyingSender) { db, err := client.Open(stopper, fmt.Sprintf("rpcs://%s@%s?certs=%s", security.NodeUser, addr, security.EmbeddedCertsDir)) if err != nil { log.Fatal(err) } sender := ¬ifyingSender{wrapped: db.GetSender()} return client.NewDBWithPriority(sender, priority), sender }
func makeDBClient() *client.DB { // TODO(pmattis): Initialize the user to something more // reasonable. Perhaps Context.Addr should be considered a URL. db, err := client.Open(Context.RequestScheme() + "://root@" + Context.Addr + "?certs=" + Context.Certs) if err != nil { fmt.Fprintf(osStderr, "failed to initialize KV client: %s", err) osExit(1) } return db }
// MakeClient creates a DB client for node 'i' using the cluster certs dir. func (l *LocalCluster) MakeClient(t util.Tester, node int) (*client.DB, *stop.Stopper) { stopper := stop.NewStopper() db, err := client.Open(stopper, "rpcs://"+security.NodeUser+"@"+ l.Nodes[node].Addr("").String()+ "?certs="+l.CertsDir) if err != nil { t.Fatal(err) } return db, stopper }
func setup(t *testing.T) (*server.TestServer, *sql.DB, *client.DB) { s := server.StartTestServer(nil) sqlDB, err := sql.Open("cockroach", "https://root@"+s.ServingAddr()+"?certs=test_certs") if err != nil { t.Fatal(err) } kvDB, err := client.Open("https://root@" + s.ServingAddr() + "?certs=test_certs") if err != nil { t.Fatal(err) } return s, sqlDB, kvDB }
// makeDBClientForUser creates a DB client for node 'i' and user 'user'. func makeDBClientForUser(t *testing.T, cluster *localcluster.Cluster, user string, node int) *client.DB { // We need to run with "InsecureSkipVerify" (set when Certs="" inside the http sender). // This is due to the fact that we're running outside docker, so we cannot use a fixed hostname // to reach the cluster. This in turn means that we do not have a verified server name in the certs. db, err := client.Open("https://" + user + "@" + cluster.Nodes[node].Addr("").String() + "?certs=" + cluster.CertsDir) if err != nil { t.Fatal(err) } return db }
// MakeClient returns a client which is pointing at the node with the given // index. The given integer must be in the range [0,NumNodes()-1]. func (r *RemoteCluster) MakeClient(t util.Tester, i int) (*client.DB, *stop.Stopper) { stopper := stop.NewStopper() // TODO(tschottdorf,mberhault): TLS all the things! db, err := client.Open(stopper, "rpc://"+"root"+"@"+ util.EnsureHostPort(r.nodes[i].Addr)+ "?certs="+"certswhocares") if err != nil { t.Fatal(err) } return db, stopper }
func makeDBClient() *client.DB { // TODO(marc): KV endpoints are now restricted to node users. // This should probably be made more explicit. db, err := client.Open(fmt.Sprintf("%s://%s@%s?certs=%s", context.RequestScheme(), security.NodeUser, context.Addr, context.Certs)) if err != nil { fmt.Fprintf(osStderr, "failed to initialize KV client: %s\n", err) osExit(1) } return db }
func makeDBClient() *client.DB { // TODO(pmattis): Initialize the user to something more // reasonable. Perhaps Context.Addr should be considered a URL. db, err := client.Open(fmt.Sprintf("%s://%s@%s?certs=%s", context.RequestScheme(), security.RootUser, context.Addr, context.Certs)) if err != nil { fmt.Fprintf(osStderr, "failed to initialize KV client: %s\n", err) osExit(1) } return db }
func main() { stopper := stop.NewStopper() defer stopper.Stop() fmt.Printf("A simple program that keeps moving money between bank accounts.\n\n") flag.Parse() if *numAccounts < 2 { fmt.Fprintf(os.Stderr, "At least two accounts are required to transfer money.\n") os.Exit(1) } if *numParallelTransfers < 1 { fmt.Fprintf(os.Stderr, "At least one transfer routine must be active.\n") os.Exit(1) } if !*useTransaction { fmt.Printf("Use of transactions has been disabled.\n") } // Initialize the bank. var bank Bank bank.firstAccount = *firstAccount bank.numAccounts = *numAccounts if *dbName == "" { // Run a test cockroach instance to represent the bank. security.SetReadFileFn(securitytest.Asset) serv := server.StartTestServer(nil) defer serv.Stop() *dbName = fmt.Sprintf("rpcs://%s@%s?certs=test_certs", security.NodeUser, serv.ServingAddr()) } // Create a database handle. db, err := client.Open(stopper, *dbName) if err != nil { log.Fatal(err) } bank.db = db // Initialize all the bank accounts. const initCash = 1000 totalCash := bank.initBankAccounts(initCash) // Start all the money transfer routines. for i := 0; i < *numParallelTransfers; i++ { // Keep transferring upto 10% of initCash between accounts. go bank.continuouslyTransferMoney(initCash / 10) } go bank.periodicallyReportStats() bank.periodicallyCheckBalances(totalCash) }
// makeDBClientForUser creates a DB client for node 'i' and user 'user'. func makeDBClientForUser(t util.Tester, lc *LocalCluster, user string, node int) (*client.DB, *stop.Stopper) { stopper := stop.NewStopper() // We need to run with "InsecureSkipVerify" (set when Certs="" inside the http sender). // This is due to the fact that we're running outside docker, so we cannot use a fixed hostname // to reach the cluster. This in turn means that we do not have a verified server name in the certs. db, err := client.Open(stopper, "rpcs://"+user+"@"+ lc.Nodes[node].Addr("").String()+ "?certs="+lc.CertsDir) if err != nil { t.Fatal(err) } return db, stopper }
func makeDBClient() (*client.DB, *stop.Stopper) { stopper := stop.NewStopper() // TODO(marc): KV endpoints are now restricted to node users. // This should probably be made more explicit. db, err := client.Open(stopper, fmt.Sprintf( "%s://%s@%s?certs=%s", cliContext.RPCRequestScheme(), security.NodeUser, cliContext.Addr, cliContext.Certs)) if err != nil { stopper.Stop() panicf("failed to initialize KV client: %s", err) } return db, stopper }
func setup(t *testing.T) (*server.TestServer, *sql.DB, *client.DB) { s := setupTestServer(t) // SQL requests use "root" which has ALL permissions on everything. sqlDB, err := sql.Open("cockroach", fmt.Sprintf("https://%s@%s?certs=test_certs", security.RootUser, s.ServingAddr())) if err != nil { t.Fatal(err) } // All KV requests need "node" certs. kvDB, err := client.Open(fmt.Sprintf("https://%s@%s?certs=test_certs", security.NodeUser, s.ServingAddr())) if err != nil { t.Fatal(err) } return s, sqlDB, kvDB }