Пример #1
0
func (c *cluster) isReplicated() (bool, string) {
	db := c.clients[0]
	rows, err := db.Scan(keys.Meta2Prefix, keys.Meta2Prefix.PrefixEnd(), 100000)
	if err != nil {
		log.Fatalf(context.Background(), "scan failed: %s\n", err)
	}

	var buf bytes.Buffer
	tw := tabwriter.NewWriter(&buf, 2, 1, 2, ' ', 0)

	done := true
	for _, row := range rows {
		desc := &roachpb.RangeDescriptor{}
		if err := row.ValueProto(desc); err != nil {
			log.Fatalf(context.Background(), "%s: unable to unmarshal range descriptor\n", row.Key)
			continue
		}
		var storeIDs []roachpb.StoreID
		for _, replica := range desc.Replicas {
			storeIDs = append(storeIDs, replica.StoreID)
		}
		fmt.Fprintf(tw, "\t%s\t%s\t[%d]\t%d\n",
			desc.StartKey, desc.EndKey, desc.RangeID, storeIDs)
		if len(desc.Replicas) != 3 {
			done = false
		}
	}
	_ = tw.Flush()
	return done, buf.String()
}
Пример #2
0
// StartTestServer starts a in-memory test server.
// Adds a permissions config for 'TestUser' under prefix 'TestUser'.
func StartTestServer(t util.Tester) *TestServer {
	s := &TestServer{}
	if err := s.Start(); err != nil {
		if t != nil {
			t.Fatalf("Could not start server: %v", err)
		} else {
			log.Fatalf("Could not start server: %v", err)
		}
	}

	// Setup permissions for a test user.
	err := s.WritePermissionConfig(TestUser,
		&proto.PermConfig{
			Read:  []string{TestUser},
			Write: []string{TestUser},
		})
	if err != nil {
		if t != nil {
			t.Fatalf("Error adding permissions config for %s: %v", TestUser, err)
		} else {
			log.Fatalf("Error adding permissions config for %s: %v", TestUser, err)
		}
	}

	log.Infof("Test server listening on %s: %s", s.Ctx.RequestScheme(), s.ServingAddr())
	return s
}
Пример #3
0
// connectGossip connects to gossip network and reads cluster ID. If
// this node is already part of a cluster, the cluster ID is verified
// for a match. If not part of a cluster, the cluster ID is set. The
// node's address is gossipped with node ID as the gossip key.
func (n *Node) connectGossip() {
	log.Infof("connecting to gossip network to verify cluster ID...")
	<-n.gossip.Connected

	val, err := n.gossip.GetInfo(gossip.KeyClusterID)
	if err != nil || val == nil {
		log.Fatalf("unable to ascertain cluster ID from gossip network: %v", err)
	}
	gossipClusterID := val.(string)

	if n.ClusterID == "" {
		n.ClusterID = gossipClusterID
	} else if n.ClusterID != gossipClusterID {
		log.Fatalf("node %d belongs to cluster %q but is attempting to connect to a gossip network for cluster %q",
			n.Descriptor.NodeID, n.ClusterID, gossipClusterID)
	}
	log.Infof("node connected via gossip and verified as part of cluster %q", gossipClusterID)

	// Gossip node address keyed by node ID.
	if n.Descriptor.NodeID != 0 {
		nodeIDKey := gossip.MakeNodeIDGossipKey(n.Descriptor.NodeID)
		if err := n.gossip.AddInfo(nodeIDKey, n.Descriptor.Address, ttlNodeIDGossip); err != nil {
			log.Errorf("couldn't gossip address for node %d: %v", n.Descriptor.NodeID, err)
		}
	}
}
Пример #4
0
// ExampleNewClock shows how to create a new
// hybrid logical clock based on the local machine's
// physical clock. The sanity checks in this example
// will, of course, not fail and the output will be
// the age of the Unix epoch in nanoseconds.
func ExampleNewClock() {
	// Initialize a new clock, using the local
	// physical clock.
	c := NewClock(UnixNano)
	// Update the state of the hybrid clock.
	s := c.Now()
	time.Sleep(50 * time.Nanosecond)
	t := proto.Timestamp{WallTime: UnixNano()}
	// The sanity checks below will usually never be triggered.

	// Timestamp implements the util.Ordered interface.
	if s.Less(t) || !t.Less(s) {
		log.Fatalf("The later timestamp is smaller than the earlier one")
	}

	if t.WallTime-s.WallTime > 0 {
		log.Fatalf("HLC timestamp %d deviates from physical clock %d", s, t)
	}

	if s.Logical > 0 {
		log.Fatalf("Trivial timestamp has logical component")
	}

	fmt.Printf("The Unix Epoch is now approximately %dns old.\n", t.WallTime)
}
Пример #5
0
func (z *zeroSum) verify(d time.Duration) {
	for {
		time.Sleep(d)

		// Grab the count of accounts from committed transactions first. The number
		// of accounts found by the SELECT should be at least this number.
		committedAccounts := uint64(z.accountsLen())

		q := `SELECT count(*), sum(balance) FROM accounts`
		var accounts uint64
		var total int64
		db := z.db[z.randNode(rand.Intn)]
		if err := db.QueryRow(q).Scan(&accounts, &total); err != nil {
			z.maybeLogError(err)
			continue
		}
		if total != 0 {
			log.Fatalf(context.Background(), "unexpected total balance %d", total)
		}
		if accounts < committedAccounts {
			log.Fatalf(context.Background(), "expected at least %d accounts, but found %d",
				committedAccounts, accounts)
		}
	}
}
Пример #6
0
// initNodeID updates the internal NodeDescriptor with the given ID. If zero is
// supplied, a new NodeID is allocated with the first invocation. For all other
// values, the supplied ID is stored into the descriptor (unless one has been
// set previously, in which case a fatal error occurs).
//
// Upon setting a new NodeID, the descriptor is gossiped and the NodeID is
// stored into the gossip instance.
func (n *Node) initNodeID(id roachpb.NodeID) {
	if id < 0 {
		log.Fatalf("NodeID must not be negative")
	}

	if o := n.Descriptor.NodeID; o > 0 {
		if id == 0 {
			return
		}
		log.Fatalf("cannot initialize NodeID to %d, already have %d", id, o)
	}
	var err error
	if id == 0 {
		id, err = allocateNodeID(n.ctx.DB)
		log.Infof("new node allocated ID %d", id)
		if err != nil {
			log.Fatal(err)
		}
		if id == 0 {
			log.Fatal("new node allocated illegal ID 0")
		}
		n.ctx.Gossip.SetNodeID(id)
	} else {
		log.Infof("node ID %d initialized", id)
	}
	// Gossip the node descriptor to make this node addressable by node ID.
	n.Descriptor.NodeID = id
	if err = n.ctx.Gossip.SetNodeDescriptor(&n.Descriptor); err != nil {
		log.Fatalf("couldn't gossip descriptor for node %d: %s", n.Descriptor.NodeID, err)
	}
}
Пример #7
0
// continuouslyTransferMoney() keeps moving random amounts between
// random accounts.
func (bank *Bank) continuouslyTransferMoney(cash int64) {
	for {
		from := bank.makeAccountID(rand.Intn(bank.numAccounts))
		to := bank.makeAccountID(rand.Intn(bank.numAccounts))
		// Continue when from == to.
		if bytes.Equal(from, to) {
			continue
		}
		exchangeAmount := rand.Int63n(cash)
		// transferMoney transfers exchangeAmount between the two accounts
		transferMoney := func(runner client.Runner) *roachpb.Error {
			batchRead := &client.Batch{}
			batchRead.Get(from)
			batchRead.Get(to)
			if err := runner.Run(batchRead); err != nil {
				return err
			}
			// Read from value.
			fromAccount := &Account{}
			if err := fromAccount.decode(batchRead.Results[0].Rows[0].ValueBytes()); err != nil {
				log.Fatalf("decoding error: %s", err)
			}
			// Ensure there is enough cash.
			if fromAccount.Balance < exchangeAmount {
				return nil
			}
			// Read to value.
			toAccount := &Account{}
			if err := toAccount.decode(batchRead.Results[1].Rows[0].ValueBytes()); err != nil {
				log.Fatalf("decoding error: %s", err)
			}

			// Update both accounts.
			batchWrite := &client.Batch{}
			fromAccount.Balance -= exchangeAmount
			toAccount.Balance += exchangeAmount
			if fromValue, err := fromAccount.encode(); err != nil {
				log.Fatalf("encoding error: %s", err)
			} else if toValue, err := toAccount.encode(); err != nil {
				log.Fatalf("encoding error: %s", err)
			} else {
				batchWrite.Put(from, fromValue)
				batchWrite.Put(to, toValue)
			}
			return runner.Run(batchWrite)
		}
		var err *roachpb.Error
		if *useTransaction {
			err = bank.db.Txn(func(txn *client.Txn) *roachpb.Error { return transferMoney(txn) })
		} else {
			err = transferMoney(bank.db)
		}
		if err != nil {
			log.Fatal(err)
		}
		atomic.AddInt32(&bank.numTransfers, 1)
	}
}
Пример #8
0
func (n *node) start() {
	n.Lock()
	defer n.Unlock()

	if n.Cmd != nil {
		return
	}

	n.Cmd = exec.Command(n.Args[0], n.Args[1:]...)

	stdoutPath := filepath.Join(n.LogDir, "stdout")
	stdout, err := os.OpenFile(stdoutPath,
		os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
	if err != nil {
		log.Fatalf(context.Background(), "unable to open file %s: %s", stdoutPath, err)
	}
	n.Cmd.Stdout = stdout

	stderrPath := filepath.Join(n.LogDir, "stderr")
	stderr, err := os.OpenFile(stderrPath,
		os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
	if err != nil {
		log.Fatalf(context.Background(), "unable to open file %s: %s", stderrPath, err)
	}
	n.Cmd.Stderr = stderr

	err = n.Cmd.Start()
	if n.Cmd.Process != nil {
		log.Infof(context.Background(), "process %d started: %s",
			n.Cmd.Process.Pid, strings.Join(n.Args, " "))
	}
	if err != nil {
		log.Infof(context.Background(), "%v", err)
		_ = stdout.Close()
		_ = stderr.Close()
		return
	}

	go func(cmd *exec.Cmd) {
		if err := cmd.Wait(); err != nil {
			log.Errorf(context.Background(), "waiting for command: %v", err)
		}
		_ = stdout.Close()
		_ = stderr.Close()

		ps := cmd.ProcessState
		sy := ps.Sys().(syscall.WaitStatus)

		log.Infof(context.Background(), "Process %d exited with status %d",
			ps.Pid(), sy.ExitStatus())
		log.Infof(context.Background(), ps.String())

		n.Lock()
		n.Cmd = nil
		n.Unlock()
	}(n.Cmd)
}
Пример #9
0
func main() {
	// Seed the random number generator for non-determinism across
	// multiple runs.
	randutil.SeedForTests()

	if f := flag.Lookup("alsologtostderr"); f != nil {
		fmt.Println("Starting simulation. Add -alsologtostderr to see progress.")
	}
	flag.Parse()

	dirName, err := ioutil.TempDir("", "gossip-simulation-")
	if err != nil {
		log.Fatalf(context.TODO(), "could not create temporary directory for gossip simulation output: %s", err)
	}

	// Simulation callbacks to run the simulation for cycleCount
	// cycles. At each cycle % outputEvery, a dot file showing the
	// state of the network graph is output.
	nodeCount := 3
	switch *size {
	case "tiny":
		// Use default parameters.
	case "small":
		nodeCount = 10
	case "medium":
		nodeCount = 25
	case "large":
		nodeCount = 50
	case "huge":
		nodeCount = 100
	case "ginormous":
		nodeCount = 250
	default:
		log.Fatalf(context.TODO(), "unknown simulation size: %s", *size)
	}

	edgeSet := make(map[string]edge)

	stopper := stop.NewStopper()
	defer stopper.Stop()

	n := simulation.NewNetwork(stopper, nodeCount, true)
	n.SimulateNetwork(
		func(cycle int, network *simulation.Network) bool {
			// Output dot graph.
			dotFN := fmt.Sprintf("%s/sim-cycle-%03d.dot", dirName, cycle)
			_, quiescent := outputDotFile(dotFN, cycle, network, edgeSet)
			// Run until network has quiesced.
			return !quiescent
		},
	)

	// Output instructions for viewing graphs.
	fmt.Printf("To view simulation graph output run (you must install graphviz):\n\nfor f in %s/*.dot ; do circo $f -Tpng -o $f.png ; echo $f.png ; done\n", dirName)
}
Пример #10
0
// createTestConfigFile creates a temporary file and writes the
// testConfig yaml data to it. The caller is responsible for
// removing it. Returns the filename for a subsequent call to
// os.Remove().
func createTestConfigFile(body string) string {
	f, err := ioutil.TempFile("", "test-config")
	if err != nil {
		log.Fatalf("failed to open temporary file: %v", err)
	}
	defer f.Close()
	if _, err = f.Write([]byte(body)); err != nil {
		log.Fatalf("failed to write to temporary file: %v", err)
	}
	return f.Name()
}
Пример #11
0
// RegisterSender registers the specified function to be used for
// creation of a new sender when the specified scheme is encountered.
func RegisterSender(scheme string, f NewSenderFunc) {
	if f == nil {
		log.Fatalf("unable to register nil function for \"%s\"", scheme)
	}
	sendersMu.Lock()
	defer sendersMu.Unlock()
	if _, ok := senders[scheme]; ok {
		log.Fatalf("sender already registered for \"%s\"", scheme)
	}
	senders[scheme] = f
}
Пример #12
0
// tempUnixFile creates a temporary file for use with a unix domain socket.
// TODO(bdarnell): use TempDir instead to make this atomic.
func tempUnixFile() string {
	f, err := ioutil.TempFile("", "unix-socket")
	if err != nil {
		log.Fatalf("unable to create temp file: %s", err)
	}
	f.Close()
	if err := os.Remove(f.Name()); err != nil {
		log.Fatalf("unable to remove temp file: %s", err)
	}
	return f.Name()
}
Пример #13
0
// ExampleManualClock shows how a manual clock can be
// used as a physical clock. This is useful for testing.
func ExampleManualClock() {
	var m ManualClock = 10
	c := NewClock(m.UnixNano)
	c.Now()
	if c.Timestamp().WallTime != 10 {
		log.Fatalf("manual clock error")
	}
	m = 20
	c.Now()
	if c.Timestamp().WallTime != 20 {
		log.Fatalf("manual clock error")
	}
}
Пример #14
0
// ExampleManualClock shows how a manual clock can be
// used as a physical clock. This is useful for testing.
func ExampleManualClock() {
	m := NewManualClock(10)
	c := NewClock(m.UnixNano)
	c.Now()
	if c.Timestamp().WallTime != 10 {
		log.Fatalf("manual clock error")
	}
	m.Set(20)
	c.Now()
	if c.Timestamp().WallTime != 20 {
		log.Fatalf("manual clock error")
	}
}
Пример #15
0
// NewNetwork creates nodeCount gossip nodes.
func NewNetwork(nodeCount int, createResolvers bool) *Network {
	log.Infof(context.TODO(), "simulating gossip network with %d nodes", nodeCount)

	n := &Network{
		Nodes:   []*Node{},
		Stopper: stop.NewStopper(),
	}
	n.rpcContext = rpc.NewContext(&base.Context{Insecure: true}, nil, n.Stopper)
	var err error
	n.tlsConfig, err = n.rpcContext.GetServerTLSConfig()
	if err != nil {
		log.Fatal(context.TODO(), err)
	}

	for i := 0; i < nodeCount; i++ {
		node, err := n.CreateNode()
		if err != nil {
			log.Fatal(context.TODO(), err)
		}
		// Build a resolver for each instance or we'll get data races.
		if createResolvers {
			r, err := resolver.NewResolverFromAddress(n.Nodes[0].Addr())
			if err != nil {
				log.Fatalf(context.TODO(), "bad gossip address %s: %s", n.Nodes[0].Addr(), err)
			}
			node.Gossip.SetResolvers([]resolver.Resolver{r})
		}
	}
	return n
}
Пример #16
0
func (c *cluster) freeze(nodeIdx int, freeze bool) {
	addr := c.rpcAddr(nodeIdx)
	conn, err := c.rpcCtx.GRPCDial(addr)
	if err != nil {
		log.Fatalf(context.Background(), "unable to dial: %s: %v", addr, err)
	}

	adminClient := serverpb.NewAdminClient(conn)
	stream, err := adminClient.ClusterFreeze(
		context.Background(), &serverpb.ClusterFreezeRequest{Freeze: freeze})
	if err != nil {
		log.Fatal(context.Background(), err)
	}
	for {
		resp, err := stream.Recv()
		if err != nil {
			if err == io.EOF {
				break
			}
			log.Fatal(context.Background(), err)
		}
		fmt.Println(resp.Message)
	}
	fmt.Println("ok")
}
Пример #17
0
// CreateLocal creates a new local cockroach cluster. The stopper is used to
// gracefully shutdown the channel (e.g. when a signal arrives). The cluster
// must be started before being used.
func CreateLocal(numLocal, numStores int, logDir string, stopper chan struct{}) *LocalCluster {
	select {
	case <-stopper:
		// The stopper was already closed, exit early.
		os.Exit(1)
	default:
	}

	if *cockroachImage == builderImage && !exists(*cockroachBinary) {
		log.Fatalf("\"%s\": does not exist", *cockroachBinary)
	}

	cli, err := dockerclient.NewEnvClient()
	maybePanic(err)

	return &LocalCluster{
		client:    cli,
		stopper:   stopper,
		numLocal:  numLocal,
		numStores: numStores,
		// TODO(tschottdorf): deadlocks will occur if these channels fill up.
		events:         make(chan Event, 1000),
		expectedEvents: make(chan Event, 1000),
		logDir:         logDir,
	}
}
Пример #18
0
// start starts the node by registering the storage instance for the
// RPC service "Node" and initializing stores for each specified
// engine. Launches periodic store gossiping in a goroutine.
func (n *Node) start(rpcServer *rpc.Server, engines []engine.Engine,
	attrs proto.Attributes, stopper *util.Stopper) error {
	n.initDescriptor(rpcServer.Addr(), attrs)
	if err := rpcServer.RegisterName("Node", (*nodeServer)(n)); err != nil {
		log.Fatalf("unable to register node service with RPC server: %s", err)
	}

	// Start status monitor.
	n.status.StartMonitorFeed(n.ctx.EventFeed)
	stopper.AddCloser(n.ctx.EventFeed)

	// Initialize stores, including bootstrapping new ones.
	if err := n.initStores(engines, stopper); err != nil {
		return err
	}

	// Pass NodeID to status monitor - this value is initialized in initStores,
	// but the StatusMonitor must be active before initStores.
	n.status.SetNodeID(n.Descriptor.NodeID)

	// Initialize publisher for Node Events.
	n.feed = status.NewNodeEventFeed(n.Descriptor.NodeID, n.ctx.EventFeed)

	n.startedAt = n.ctx.Clock.Now().WallTime
	n.startStoresScanner(stopper)
	n.startPublishStatuses(stopper)
	n.startGossip(stopper)
	log.Infoc(n.context(), "Started node with %v engine(s) and attributes %v", engines, attrs.Attrs)
	return nil
}
Пример #19
0
// strictErrorLog panics in strict mode and logs an error otherwise.  Arguments are printf-style
// and will be passed directly to either log.Errorf or log.Fatalf.
func (m *MultiRaft) strictErrorLog(format string, args ...interface{}) {
	if m.Strict {
		log.Fatalf(format, args...)
	} else {
		log.Errorf(format, args...)
	}
}
Пример #20
0
// getNextBootstrapAddress returns the next available bootstrap
// address by consulting the first non-exhausted resolver from the
// slice supplied to the constructor or set using setBootstrap().
// The lock is assumed held.
func (g *Gossip) getNextBootstrapAddress() net.Addr {
	if len(g.resolvers) == 0 {
		log.Fatalf("no resolvers specified for gossip network")
	}

	// Run through resolvers round robin starting at last resolved index.
	for i := 0; i < len(g.resolvers); i++ {
		g.resolverIdx = (g.resolverIdx + 1) % len(g.resolvers)
		if g.resolverIdx == len(g.resolvers)-1 {
			g.triedAll = true
		}
		resolver := g.resolvers[g.resolverIdx]
		addr, err := resolver.GetAddress()
		if err != nil {
			log.Errorf("invalid bootstrap address: %+v, %v", resolver, err)
			continue
		} else if addr.String() == g.is.NodeAddr.String() {
			// Skip our own node address.
			continue
		}
		_, addrActive := g.bootstrapping[addr.String()]
		if !resolver.IsExhausted() || !addrActive {
			g.bootstrapping[addr.String()] = struct{}{}
			return addr
		}
	}

	return nil
}
Пример #21
0
// CreateLocal creates a new local cockroach cluster. The stopper is used to
// gracefully shutdown the channel (e.g. when a signal arrives). The cluster
// must be started before being used.
func CreateLocal(cfg TestConfig, logDir string, privileged bool, stopper chan struct{}) *LocalCluster {
	select {
	case <-stopper:
		// The stopper was already closed, exit early.
		os.Exit(1)
	default:
	}

	if *cockroachImage == builderImageFull && !exists(*cockroachBinary) {
		log.Fatalf("\"%s\": does not exist", *cockroachBinary)
	}

	cli, err := client.NewEnvClient()
	maybePanic(err)

	retryingClient := retryingDockerClient{
		resilientDockerClient: resilientDockerClient{APIClient: cli},
		attempts:              10,
		timeout:               10 * time.Second,
	}

	return &LocalCluster{
		client:  retryingClient,
		stopper: stopper,
		config:  cfg,
		// TODO(tschottdorf): deadlocks will occur if these channels fill up.
		events:         make(chan Event, 1000),
		expectedEvents: make(chan Event, 1000),
		logDir:         logDir,
		privileged:     privileged,
	}
}
Пример #22
0
func (lt *localRPCTransport) Listen(id proto.RaftNodeID, server ServerInterface) error {
	addr := util.CreateTestAddr("tcp")
	rpcServer := crpc.NewServer(addr, &crpc.Context{
		Context: base.Context{
			Insecure: true,
		},
		Stopper:      lt.stopper,
		DisableCache: true,
	})
	err := rpcServer.RegisterAsync(raftMessageName,
		func(argsI gogoproto.Message, callback func(gogoproto.Message, error)) {
			protoArgs := argsI.(*proto.RaftMessageRequest)
			args := RaftMessageRequest{
				GroupID: protoArgs.GroupID,
			}
			if err := args.Message.Unmarshal(protoArgs.Msg); err != nil {
				callback(nil, err)
			}
			err := server.RaftMessage(&args, &RaftMessageResponse{})
			callback(&proto.RaftMessageResponse{}, err)
		}, &proto.RaftMessageRequest{})
	if err != nil {
		return err
	}

	lt.mu.Lock()
	if _, ok := lt.servers[id]; ok {
		log.Fatalf("node %d already listening", id)
	}
	lt.servers[id] = rpcServer
	lt.mu.Unlock()

	return rpcServer.Start()
}
Пример #23
0
func Example_node() {
	c := newCLITest()
	defer c.stop()

	// Refresh time series data, which is required to retrieve stats.
	if err := c.TestServer.WriteSummaries(); err != nil {
		log.Fatalf(context.Background(), "Couldn't write stats summaries: %s", err)
	}

	c.Run("node ls")
	c.Run("node ls --pretty")
	c.Run("node status 10000")

	// Output:
	// node ls
	// 1 row
	// id
	// 1
	// node ls --pretty
	// +----+
	// | id |
	// +----+
	// |  1 |
	// +----+
	// (1 row)
	// node status 10000
	// Error: node 10000 doesn't exist
}
Пример #24
0
// Start runs the RPC server. After this method returns, the socket
// will have been bound. Use Server.Addr() to ascertain server address.
func (s *Server) Start() error {
	ln, err := net.Listen(s.addr.Network(), s.addr.String())
	if err != nil {
		return err
	}
	s.listener = ln

	s.mu.Lock()
	s.addr = ln.Addr()
	s.mu.Unlock()

	go func() {
		// Start serving in a loop until listener is closed.
		log.Infof("serving on %+v...", s.Addr())
		for {
			conn, err := ln.Accept()
			if err != nil {
				s.mu.Lock()
				if !s.closed {
					log.Fatalf("server terminated: %s", err)
				}
				s.mu.Unlock()
				break
			}
			// Serve connection to completion in a goroutine.
			go s.serveConn(conn)
		}
		log.Infof("done serving on %+v", s.Addr())
	}()
	return nil
}
Пример #25
0
func ExampleDB_Put_insecure() {
	s := &server.TestServer{}
	s.Ctx = server.NewTestContext()
	s.Ctx.Insecure = true
	if pErr := s.Start(); pErr != nil {
		log.Fatalf("Could not start server: %v", pErr)
	}
	defer s.Stop()

	db, err := client.Open(s.Stopper(), "rpc://foo@"+s.ServingAddr())
	if err != nil {
		log.Fatal(err)
	}

	if pErr := db.Put("aa", "1"); pErr != nil {
		panic(pErr)
	}
	result, pErr := db.Get("aa")
	if pErr != nil {
		panic(pErr)
	}
	fmt.Printf("aa=%s\n", result.ValueBytes())

	// Output:
	// aa=1
}
Пример #26
0
func (lt *localRPCTransport) Listen(id roachpb.StoreID, server ServerInterface) error {
	addr := util.CreateTestAddr("tcp")
	rpcServer := crpc.NewServer(addr, &crpc.Context{
		Context: base.Context{
			Insecure: true,
		},
		Stopper:      lt.stopper,
		DisableCache: true,
	})
	err := rpcServer.RegisterAsync(raftMessageName, false, /*not public*/
		func(argsI proto.Message, callback func(proto.Message, error)) {
			args := argsI.(*RaftMessageRequest)
			resp, err := server.RaftMessage(args)
			callback(resp, err)
		}, &RaftMessageRequest{})
	if err != nil {
		return err
	}

	lt.mu.Lock()
	if _, ok := lt.servers[id]; ok {
		log.Fatalf("node %d already listening", id)
	}
	lt.servers[id] = rpcServer
	lt.mu.Unlock()

	return rpcServer.Start()
}
Пример #27
0
// runExterminate destroys the data held in the specified stores.
func runExterminate(cmd *cobra.Command, args []string) {
	err := Context.Init("exterminate")
	if err != nil {
		log.Errorf("failed to initialize context: %s", err)
		return
	}

	// First attempt to shutdown the server. Note that an error of EOF just
	// means the HTTP server shutdown before the request to quit returned.
	if err := server.SendQuit(Context); err != nil {
		log.Infof("shutdown node %s: %s", Context.Addr, err)
	} else {
		log.Infof("shutdown node in anticipation of data extermination")
	}

	// Exterminate all data held in specified stores.
	for _, e := range Context.Engines {
		if rocksdb, ok := e.(*engine.RocksDB); ok {
			log.Infof("exterminating data from store %s", e)
			if err := rocksdb.Destroy(); err != nil {
				log.Fatalf("unable to destroy store %s: %s", e, err)
			}
		}
	}
	log.Infof("exterminated all data from stores %s", Context.Engines)
}
Пример #28
0
func verifyBank(db *sql.DB) {
	var sum int64
	if *aggregate {
		if err := db.QueryRow("SELECT SUM(balance) FROM accounts").Scan(&sum); err != nil {
			log.Fatal(err)
		}
	} else {
		tx, err := db.Begin()
		if err != nil {
			log.Fatal(err)
		}
		rows, err := tx.Query("SELECT balance FROM accounts")
		if err != nil {
			log.Fatal(err)
		}
		for rows.Next() {
			var balance int64
			if err = rows.Scan(&balance); err != nil {
				log.Fatal(err)
			}
			sum += balance
		}
		if err = tx.Commit(); err != nil {
			log.Fatal(err)
		}
	}

	if sum == 0 {
		log.Info("The bank is in good order.")
	} else {
		log.Fatalf("The bank is not in good order. Total value: %d", sum)
	}
}
Пример #29
0
// start starts the node by registering the storage instance for the
// RPC service "Node" and initializing stores for each specified
// engine. Launches periodic store gossiping in a goroutine.
func (n *Node) start(rpcServer *rpc.Server, addr net.Addr, engines []engine.Engine,
	attrs roachpb.Attributes, stopper *stop.Stopper) error {
	n.initDescriptor(addr, attrs)
	const method = "Node.Batch"
	if err := rpcServer.Register(method, n.executeCmd, &roachpb.BatchRequest{}); err != nil {
		log.Fatalf("unable to register node service with RPC server: %s", err)
	}

	// Start status monitor.
	n.status.StartMonitorFeed(n.ctx.EventFeed)

	// Initialize stores, including bootstrapping new ones.
	if err := n.initStores(engines, stopper); err != nil {
		return err
	}

	n.startedAt = n.ctx.Clock.Now().WallTime

	// Initialize publisher for Node Events. This requires the NodeID, which is
	// initialized by initStores(); because of this, some Store initialization
	// events will precede the StartNodeEvent on the feed.
	n.feed = status.NewNodeEventFeed(n.Descriptor.NodeID, n.ctx.EventFeed)
	n.feed.StartNode(n.Descriptor, n.startedAt)

	n.startPublishStatuses(stopper)
	n.startGossip(stopper)
	log.Infoc(n.context(), "Started node with %v engine(s) and attributes %v", engines, attrs.Attrs)
	return nil
}
Пример #30
0
// NewNetwork creates nodeCount gossip nodes.
func NewNetwork(nodeCount int) *Network {
	clock := hlc.NewClock(hlc.UnixNano)

	log.Infof("simulating gossip network with %d nodes", nodeCount)

	n := &Network{
		Nodes:   []*Node{},
		Stopper: stop.NewStopper(),
	}
	n.rpcContext = rpc.NewContext(&base.Context{Insecure: true}, clock, n.Stopper)
	var err error
	n.tlsConfig, err = n.rpcContext.GetServerTLSConfig()
	if err != nil {
		log.Fatal(err)
	}

	for i := 0; i < nodeCount; i++ {
		node, err := n.CreateNode()
		if err != nil {
			log.Fatal(err)
		}
		// Build a resolver for each instance or we'll get data races.
		r, err := resolver.NewResolverFromAddress(n.Nodes[0].Addr)
		if err != nil {
			log.Fatalf("bad gossip address %s: %s", n.Nodes[0].Addr, err)
		}
		node.Gossip.SetResolvers([]resolver.Resolver{r})
		if err := n.StartNode(node); err != nil {
			log.Fatal(err)
		}
	}
	return n
}