예제 #1
0
func verifyBank(db *sql.DB) {
	var sum int64
	if *aggregate {
		if err := db.QueryRow("SELECT SUM(balance) FROM accounts").Scan(&sum); err != nil {
			log.Fatal(err)
		}
	} else {
		tx, err := db.Begin()
		if err != nil {
			log.Fatal(err)
		}
		rows, err := tx.Query("SELECT balance FROM accounts")
		if err != nil {
			log.Fatal(err)
		}
		for rows.Next() {
			var balance int64
			if err = rows.Scan(&balance); err != nil {
				log.Fatal(err)
			}
			sum += balance
		}
		if err = tx.Commit(); err != nil {
			log.Fatal(err)
		}
	}

	if sum == 0 {
		log.Info("The bank is in good order.")
	} else {
		log.Fatalf("The bank is not in good order. Total value: %d", sum)
	}
}
예제 #2
0
파일: node.go 프로젝트: gechong/cockroach
// bootstrapStores bootstraps uninitialized stores once the cluster
// and node IDs have been established for this node. Store IDs are
// allocated via a sequence id generator stored at a system key per
// node.
func (n *Node) bootstrapStores(bootstraps *list.List, stopper *stop.Stopper) {
	log.Infof("bootstrapping %d store(s)", bootstraps.Len())
	if n.ClusterID == "" {
		panic("ClusterID missing during store bootstrap of auxiliary store")
	}

	// Bootstrap all waiting stores by allocating a new store id for
	// each and invoking store.Bootstrap() to persist.
	inc := int64(bootstraps.Len())
	firstID, err := allocateStoreIDs(n.Descriptor.NodeID, inc, n.ctx.DB)
	if err != nil {
		log.Fatal(err)
	}
	sIdent := roachpb.StoreIdent{
		ClusterID: n.ClusterID,
		NodeID:    n.Descriptor.NodeID,
		StoreID:   firstID,
	}
	for e := bootstraps.Front(); e != nil; e = e.Next() {
		s := e.Value.(*storage.Store)
		if err := s.Bootstrap(sIdent, stopper); err != nil {
			log.Fatal(err)
		}
		if err := s.Start(stopper); err != nil {
			log.Fatal(err)
		}
		n.stores.AddStore(s)
		sIdent.StoreID++
		log.Infof("bootstrapped store %s", s)
		// Done regularly in Node.startGossip, but this cuts down the time
		// until this store is used for range allocations.
		s.GossipStore()
	}
}
예제 #3
0
// SimulateNetwork runs until the simCallback returns false.
//
// At each cycle, every node gossips a key equal to its address (unique)
// with the cycle as the value. The received cycle value can be used
// to determine the aging of information between any two nodes in the
// network.
//
// At each cycle of the simulation, node 0 gossips the sentinel.
//
// The simulation callback receives the cycle and the network as arguments.
func (n *Network) SimulateNetwork(simCallback func(cycle int, network *Network) bool) {
	nodes := n.Nodes
	for cycle := 1; simCallback(cycle, n); cycle++ {
		// Node 0 gossips sentinel & cluster ID every cycle.
		if err := nodes[0].Gossip.AddInfo(
			gossip.KeySentinel,
			encoding.EncodeUint64Ascending(nil, uint64(cycle)),
			time.Hour); err != nil {
			log.Fatal(err)
		}
		if err := nodes[0].Gossip.AddInfo(gossip.KeyClusterID,
			encoding.EncodeUint64Ascending(nil, uint64(cycle)), 0*time.Second); err != nil {
			log.Fatal(err)
		}
		// Every node gossips cycle.
		for _, node := range nodes {
			if err := node.Gossip.AddInfo(
				node.Addr.String(),
				encoding.EncodeUint64Ascending(nil, uint64(cycle)),
				time.Hour); err != nil {
				log.Fatal(err)
			}
			node.Gossip.SimulationCycle()
		}
		time.Sleep(5 * time.Millisecond)
	}
	log.Infof("gossip network simulation: total infos sent=%d, received=%d", n.infosSent(), n.infosReceived())
}
예제 #4
0
func main() {
	if err := parseFlags(); err != nil {
		log.Fatal(err)
	}

	serv := maybeStartLocalServer()
	if serv != nil {
		defer serv.Stop()
	}

	db, err := setupDatabase()
	if err != nil {
		log.Fatal(err)
	}

	lastNow := time.Now()
	var lastNumDumps uint64
	writers := make([]blockWriter, *concurrency)

	for i := range writers {
		writers[i] = newBlockWriter(db)
		go writers[i].run()
	}

	for range time.Tick(*outputInterval) {
		now := time.Now()
		elapsed := time.Since(lastNow)
		dumps := atomic.LoadUint64(&numBlocks)
		log.Infof("%d dumps were executed at %.1f/second.", (dumps - lastNumDumps), float64(dumps-lastNumDumps)/elapsed.Seconds())
		lastNumDumps = dumps
		lastNow = now
	}
}
예제 #5
0
파일: node.go 프로젝트: guanqun/cockroach
// bootstrapStores bootstraps uninitialized stores once the cluster
// and node IDs have been established for this node. Store IDs are
// allocated via a sequence id generator stored at a system key per
// node.
func (n *Node) bootstrapStores(bootstraps []*storage.Store, stopper *stop.Stopper) {
	if n.ClusterID == *uuid.EmptyUUID {
		panic("ClusterID missing during store bootstrap of auxiliary store")
	}

	// Bootstrap all waiting stores by allocating a new store id for
	// each and invoking store.Bootstrap() to persist.
	inc := int64(len(bootstraps))
	firstID, err := allocateStoreIDs(n.Descriptor.NodeID, inc, n.ctx.DB)
	if err != nil {
		log.Fatal(err)
	}
	sIdent := roachpb.StoreIdent{
		ClusterID: n.ClusterID,
		NodeID:    n.Descriptor.NodeID,
		StoreID:   firstID,
	}
	for _, s := range bootstraps {
		if err := s.Bootstrap(sIdent, stopper); err != nil {
			log.Fatal(err)
		}
		if err := s.Start(stopper); err != nil {
			log.Fatal(err)
		}
		n.stores.AddStore(s)
		sIdent.StoreID++
		log.Infof("bootstrapped store %s", s)
		// Done regularly in Node.startGossip, but this cuts down the time
		// until this store is used for range allocations.
		s.GossipStore()
	}
}
예제 #6
0
// NewNetwork creates nodeCount gossip nodes.
func NewNetwork(nodeCount int) *Network {
	clock := hlc.NewClock(hlc.UnixNano)

	log.Infof("simulating gossip network with %d nodes", nodeCount)

	n := &Network{
		Nodes:   []*Node{},
		Stopper: stop.NewStopper(),
	}
	n.rpcContext = rpc.NewContext(&base.Context{Insecure: true}, clock, n.Stopper)
	var err error
	n.tlsConfig, err = n.rpcContext.GetServerTLSConfig()
	if err != nil {
		log.Fatal(err)
	}

	for i := 0; i < nodeCount; i++ {
		node, err := n.CreateNode()
		if err != nil {
			log.Fatal(err)
		}
		// Build a resolver for each instance or we'll get data races.
		r, err := resolver.NewResolverFromAddress(n.Nodes[0].Addr)
		if err != nil {
			log.Fatalf("bad gossip address %s: %s", n.Nodes[0].Addr, err)
		}
		node.Gossip.SetResolvers([]resolver.Resolver{r})
		if err := n.StartNode(node); err != nil {
			log.Fatal(err)
		}
	}
	return n
}
예제 #7
0
func (z *zeroSum) setup() uint32 {
	db := z.db[0]
	if _, err := db.Exec("CREATE DATABASE IF NOT EXISTS zerosum"); err != nil {
		log.Fatal(context.Background(), err)
	}

	accounts := `
CREATE TABLE IF NOT EXISTS accounts (
  id INT PRIMARY KEY,
  balance INT NOT NULL
)
`
	if _, err := db.Exec(accounts); err != nil {
		log.Fatal(context.Background(), err)
	}

	tableIDQuery := `
SELECT tables.id FROM system.namespace tables
  JOIN system.namespace dbs ON dbs.id = tables.parentid
  WHERE dbs.name = $1 AND tables.name = $2
`
	var tableID uint32
	if err := db.QueryRow(tableIDQuery, "zerosum", "accounts").Scan(&tableID); err != nil {
		log.Fatal(context.Background(), err)
	}
	return tableID
}
예제 #8
0
// NewNetwork creates nodeCount gossip nodes.
func NewNetwork(nodeCount int, createResolvers bool) *Network {
	log.Infof(context.TODO(), "simulating gossip network with %d nodes", nodeCount)

	n := &Network{
		Nodes:   []*Node{},
		Stopper: stop.NewStopper(),
	}
	n.rpcContext = rpc.NewContext(&base.Context{Insecure: true}, nil, n.Stopper)
	var err error
	n.tlsConfig, err = n.rpcContext.GetServerTLSConfig()
	if err != nil {
		log.Fatal(context.TODO(), err)
	}

	for i := 0; i < nodeCount; i++ {
		node, err := n.CreateNode()
		if err != nil {
			log.Fatal(context.TODO(), err)
		}
		// Build a resolver for each instance or we'll get data races.
		if createResolvers {
			r, err := resolver.NewResolverFromAddress(n.Nodes[0].Addr())
			if err != nil {
				log.Fatalf(context.TODO(), "bad gossip address %s: %s", n.Nodes[0].Addr(), err)
			}
			node.Gossip.SetResolvers([]resolver.Resolver{r})
		}
	}
	return n
}
예제 #9
0
func (c *cluster) freeze(nodeIdx int, freeze bool) {
	addr := c.rpcAddr(nodeIdx)
	conn, err := c.rpcCtx.GRPCDial(addr)
	if err != nil {
		log.Fatalf(context.Background(), "unable to dial: %s: %v", addr, err)
	}

	adminClient := serverpb.NewAdminClient(conn)
	stream, err := adminClient.ClusterFreeze(
		context.Background(), &serverpb.ClusterFreezeRequest{Freeze: freeze})
	if err != nil {
		log.Fatal(context.Background(), err)
	}
	for {
		resp, err := stream.Recv()
		if err != nil {
			if err == io.EOF {
				break
			}
			log.Fatal(context.Background(), err)
		}
		fmt.Println(resp.Message)
	}
	fmt.Println("ok")
}
예제 #10
0
파일: docker.go 프로젝트: nkhuyu/cockroach
// newDockerClient constructs a new docker client using the best available
// method. If DOCKER_HOST is set, initialize the client using DOCKER_TLS_VERIFY
// and DOCKER_CERT_PATH. If DOCKER_HOST is not set, look for the unix domain
// socket in /run/docker.sock and /var/run/docker.sock.
func newDockerClient() dockerclient.Client {
	if host := os.Getenv("DOCKER_HOST"); host != "" {
		if os.Getenv("DOCKER_TLS_VERIFY") == "" {
			c, err := dockerclient.NewDockerClient(host, nil)
			if err != nil {
				log.Fatal(err)
			}
			return c
		}
		c, err := dockerclient.NewDockerClient(host, getTLSConfig())
		if err != nil {
			log.Fatal(err)
		}
		return c
	}

	for _, l := range []string{"/run/docker.sock", "/var/run/docker.sock"} {
		if _, err := os.Stat(l); err != nil {
			continue
		}
		c, err := dockerclient.NewDockerClient("unix://"+l, nil)
		if err != nil {
			return nil
		}
		return c
	}
	log.Fatal("docker not configured")
	return nil
}
예제 #11
0
// startAdminServer launches a new admin server using minimal engine
// and local database setup. Returns the new http test server, which
// should be cleaned up by caller via httptest.Server.Close(). The
// Cockroach KV client address is set to the address of the test server.
func startAdminServer() (string, *stop.Stopper) {
	stopper := stop.NewStopper()
	db, err := BootstrapCluster("cluster-1", []engine.Engine{engine.NewInMem(proto.Attributes{}, 1<<20)}, stopper)
	if err != nil {
		log.Fatal(err)
	}
	admin := newAdminServer(db, stopper)
	mux := http.NewServeMux()
	mux.Handle(adminEndpoint, admin)
	mux.Handle(debugEndpoint, admin)
	httpServer := httptest.NewUnstartedServer(mux)
	tlsConfig, err := testContext.GetServerTLSConfig()
	if err != nil {
		log.Fatal(err)
	}
	httpServer.TLS = tlsConfig
	httpServer.StartTLS()
	stopper.AddCloser(httpServer)

	if strings.HasPrefix(httpServer.URL, "http://") {
		testContext.Addr = strings.TrimPrefix(httpServer.URL, "http://")
	} else if strings.HasPrefix(httpServer.URL, "https://") {
		testContext.Addr = strings.TrimPrefix(httpServer.URL, "https://")
	}
	return httpServer.URL, stopper
}
예제 #12
0
파일: node.go 프로젝트: gechong/cockroach
// initNodeID updates the internal NodeDescriptor with the given ID. If zero is
// supplied, a new NodeID is allocated with the first invocation. For all other
// values, the supplied ID is stored into the descriptor (unless one has been
// set previously, in which case a fatal error occurs).
//
// Upon setting a new NodeID, the descriptor is gossiped and the NodeID is
// stored into the gossip instance.
func (n *Node) initNodeID(id roachpb.NodeID) {
	if id < 0 {
		log.Fatalf("NodeID must not be negative")
	}

	if o := n.Descriptor.NodeID; o > 0 {
		if id == 0 {
			return
		}
		log.Fatalf("cannot initialize NodeID to %d, already have %d", id, o)
	}
	var err error
	if id == 0 {
		id, err = allocateNodeID(n.ctx.DB)
		log.Infof("new node allocated ID %d", id)
		if err != nil {
			log.Fatal(err)
		}
		if id == 0 {
			log.Fatal("new node allocated illegal ID 0")
		}
		n.ctx.Gossip.SetNodeID(id)
	} else {
		log.Infof("node ID %d initialized", id)
	}
	// Gossip the node descriptor to make this node addressable by node ID.
	n.Descriptor.NodeID = id
	if err = n.ctx.Gossip.SetNodeDescriptor(&n.Descriptor); err != nil {
		log.Fatalf("couldn't gossip descriptor for node %d: %s", n.Descriptor.NodeID, err)
	}
}
예제 #13
0
// NewNetwork creates nodeCount gossip nodes. The networkType should
// be set to either "tcp" or "unix". The gossipInterval should be set
// to a compressed simulation timescale, though large enough to give
// the concurrent goroutines enough time to pass data back and forth
// in order to yield accurate estimates of how old data actually ends
// up being at the various nodes (e.g. DefaultTestGossipInterval).
// TODO: This method should take `stopper` as an argument.
func NewNetwork(nodeCount int, networkType string,
	gossipInterval time.Duration) *Network {
	clock := hlc.NewClock(hlc.UnixNano)

	log.Infof("simulating gossip network with %d nodes", nodeCount)

	stopper := stop.NewStopper()

	rpcContext := rpc.NewContext(&base.Context{Insecure: true}, clock, stopper)

	nodes := make([]*Node, nodeCount)
	for i := range nodes {
		server := rpc.NewServer(util.CreateTestAddr(networkType), rpcContext)
		if err := server.Start(); err != nil {
			log.Fatal(err)
		}
		nodes[i] = &Node{Server: server}
	}

	var numResolvers int
	if len(nodes) > 3 {
		numResolvers = 3
	} else {
		numResolvers = len(nodes)
	}

	for i, leftNode := range nodes {
		// Build new resolvers for each instance or we'll get data races.
		var resolvers []resolver.Resolver
		for _, rightNode := range nodes[:numResolvers] {
			resolvers = append(resolvers, resolver.NewResolverFromAddress(rightNode.Server.Addr()))
		}

		gossipNode := gossip.New(rpcContext, gossipInterval, resolvers)
		addr := leftNode.Server.Addr()
		if err := gossipNode.SetNodeDescriptor(&roachpb.NodeDescriptor{
			NodeID:  roachpb.NodeID(i + 1),
			Address: util.MakeUnresolvedAddr(addr.Network(), addr.String()),
		}); err != nil {
			log.Fatal(err)
		}

		gossipNode.Start(leftNode.Server, stopper)
		stopper.AddCloser(leftNode.Server)

		leftNode.Gossip = gossipNode
	}

	return &Network{
		Nodes:          nodes,
		NetworkType:    networkType,
		GossipInterval: gossipInterval,
		Stopper:        stopper,
	}
}
예제 #14
0
// NewNetwork creates nodeCount gossip nodes. The networkType should
// be set to either "tcp" or "unix".
func NewNetwork(nodeCount int, networkType string) *Network {
	clock := hlc.NewClock(hlc.UnixNano)

	log.Infof("simulating gossip network with %d nodes", nodeCount)

	stopper := stop.NewStopper()

	rpcContext := rpc.NewContext(&base.Context{Insecure: true}, clock, stopper)
	tlsConfig, err := rpcContext.GetServerTLSConfig()
	if err != nil {
		log.Fatal(err)
	}

	nodes := make([]*Node, nodeCount)
	for i := range nodes {
		server := rpc.NewServer(rpcContext)

		testAddr := util.CreateTestAddr(networkType)
		ln, err := util.ListenAndServe(stopper, server, testAddr, tlsConfig)
		if err != nil {
			log.Fatal(err)
		}

		nodes[i] = &Node{Server: server, Addr: ln.Addr()}
	}

	for i, leftNode := range nodes {
		// Build new resolvers for each instance or we'll get data races.
		resolvers := []resolver.Resolver{resolver.NewResolverFromAddress(nodes[0].Addr)}

		gossipNode := gossip.New(rpcContext, resolvers)
		addr := leftNode.Addr
		gossipNode.SetNodeID(roachpb.NodeID(i + 1))
		if err := gossipNode.SetNodeDescriptor(&roachpb.NodeDescriptor{
			NodeID:  roachpb.NodeID(i + 1),
			Address: util.MakeUnresolvedAddr(addr.Network(), addr.String()),
		}); err != nil {
			log.Fatal(err)
		}
		if err := gossipNode.AddInfo(addr.String(), encoding.EncodeUint64(nil, 0), time.Hour); err != nil {
			log.Fatal(err)
		}
		gossipNode.Start(leftNode.Server, addr, stopper)
		gossipNode.EnableSimulationCycler(true)

		leftNode.Gossip = gossipNode
	}

	return &Network{
		Nodes:       nodes,
		NetworkType: networkType,
		Stopper:     stopper,
	}
}
예제 #15
0
func main() {
	if err := parseFlags(); err != nil {
		log.Fatal(err)
	}

	serv := maybeStartLocalServer()
	if serv != nil {
		defer serv.Stop()
	}

	db, err := setupDatabase()
	if err != nil {
		log.Fatal(err)
	}

	lastNow := time.Now()
	var lastNumDumps uint64
	writers := make([]blockWriter, *concurrency)

	errCh := make(chan error)
	for i := range writers {
		writers[i] = newBlockWriter(db)
		go writers[i].run(errCh)
	}

	var numErr int
	for range time.Tick(*outputInterval) {
		now := time.Now()
		elapsed := time.Since(lastNow)
		dumps := atomic.LoadUint64(&numBlocks)
		log.Infof("%d dumps were executed at %.1f/second (%d total errors)", (dumps - lastNumDumps), float64(dumps-lastNumDumps)/elapsed.Seconds(), numErr)
		for {
			select {
			case err := <-errCh:
				numErr++
				if !*tolerateErrors {
					log.Fatal(err)
				} else {
					log.Warning(err)
				}
				continue
			default:
			}
			break
		}
		lastNumDumps = dumps
		lastNow = now
	}
}
예제 #16
0
func newCLITest() cliTest {
	// Reset the client context for each test. We don't reset the
	// pointer (because they are tied into the flags), but instead
	// overwrite the existing struct's values.
	baseCtx.InitDefaults()
	cliCtx.InitCLIDefaults()

	osStderr = os.Stdout

	s, err := serverutils.StartServerRaw(base.TestServerArgs{})
	if err != nil {
		log.Fatalf(context.Background(), "Could not start server: %v", err)
	}

	tempDir, err := ioutil.TempDir("", "cli-test")
	if err != nil {
		log.Fatal(context.Background(), err)
	}

	// Copy these assets to disk from embedded strings, so this test can
	// run from a standalone binary.
	// Disable embedded certs, or the security library will try to load
	// our real files as embedded assets.
	security.ResetReadFileFn()

	assets := []string{
		filepath.Join(security.EmbeddedCertsDir, security.EmbeddedCACert),
		filepath.Join(security.EmbeddedCertsDir, security.EmbeddedCAKey),
		filepath.Join(security.EmbeddedCertsDir, security.EmbeddedNodeCert),
		filepath.Join(security.EmbeddedCertsDir, security.EmbeddedNodeKey),
		filepath.Join(security.EmbeddedCertsDir, security.EmbeddedRootCert),
		filepath.Join(security.EmbeddedCertsDir, security.EmbeddedRootKey),
	}

	for _, a := range assets {
		securitytest.RestrictedCopy(nil, a, tempDir, filepath.Base(a))
	}

	return cliTest{
		TestServer: s.(*server.TestServer),
		certsDir:   tempDir,
		cleanupFunc: func() {
			if err := os.RemoveAll(tempDir); err != nil {
				log.Fatal(context.Background(), err)
			}
		},
	}
}
예제 #17
0
func (l *Cluster) startNode(i int) *Container {
	gossipNodes := []string{}
	for i := 0; i < l.numNodes; i++ {
		gossipNodes = append(gossipNodes, fmt.Sprintf("%s:%d", node(i), cockroachPort))
	}

	cmd := []string{
		"start",
		"--stores=ssd=" + data(i),
		"--certs=/certs",
		"--addr=" + fmt.Sprintf("%s:%d", node(i), cockroachPort),
		"--gossip=" + strings.Join(gossipNodes, ","),
		"--scan-max-idle-time=200ms", // set low to speed up tests
	}
	if len(l.LogDir) > 0 {
		dockerDir := "/logs/" + node(i)
		localDir := l.LogDir + "/" + node(i)
		if !exists(localDir) {
			if err := os.Mkdir(localDir, 0777); err != nil {
				log.Fatal(err)
			}
		}
		log.Infof("Logs for node %s are located at %s.", node(i), localDir)
		cmd = append(
			cmd,
			"--log-dir="+dockerDir,
			"--logtostderr=false",
			"--alsologtostderr=true")
	}
	c := l.createRoach(i, cmd...)
	maybePanic(c.Start(nil, l.dns, l.vols))
	c.Name = node(i)
	log.Infof("started %s: https://%s", c.Name, c.Addr(""))
	return c
}
예제 #18
0
func (c *cluster) makeNode(nodeIdx int, extraArgs []string) *node {
	name := fmt.Sprintf("%d", nodeIdx+1)
	dir := filepath.Join(dataDir, name)
	logDir := filepath.Join(dir, "logs")
	if err := os.MkdirAll(logDir, 0755); err != nil {
		log.Fatal(context.Background(), err)
	}

	args := []string{
		cockroachBin,
		"start",
		"--insecure",
		fmt.Sprintf("--port=%d", c.rpcPort(nodeIdx)),
		fmt.Sprintf("--http-port=%d", c.httpPort(nodeIdx)),
		fmt.Sprintf("--store=%s", dir),
		fmt.Sprintf("--cache=256MiB"),
		fmt.Sprintf("--logtostderr"),
	}
	if nodeIdx > 0 {
		args = append(args, fmt.Sprintf("--join=localhost:%d", c.rpcPort(0)))
	}
	args = append(args, extraArgs...)

	node := &node{
		LogDir: logDir,
		Args:   args,
	}
	node.start()
	return node
}
예제 #19
0
func createTestClient(addr string) *client.DB {
	db, err := client.Open("https://root@" + addr + "?certs=" + security.EmbeddedCertsDir)
	if err != nil {
		log.Fatal(err)
	}
	return db
}
예제 #20
0
// Serve accepts and services connections on the already started
// listener.
func (s *Server) Serve(handler http.Handler) {
	s.handler = handler
	s.activeConns = make(map[net.Conn]struct{})

	server := &http.Server{
		Handler: s,
		ConnState: func(conn net.Conn, state http.ConnState) {
			s.mu.Lock()
			defer s.mu.Unlock()

			switch state {
			case http.StateNew:
				s.activeConns[conn] = struct{}{}
			case http.StateHijacked, http.StateClosed:
				delete(s.activeConns, conn)
			}
		},
	}

	s.context.Stopper.RunWorker(func() {
		if err := server.Serve(s.listener); err != nil {
			if !strings.HasSuffix(err.Error(), "use of closed network connection") {
				log.Fatal(err)
			}
		}
	})

	s.context.Stopper.RunWorker(func() {
		<-s.context.Stopper.ShouldStop()
		s.Close()
	})
}
예제 #21
0
// Serve accepts and services connections on the already started
// listener.
func (s *Server) Serve(handler http.Handler) {
	s.handler = handler
	s.activeConns = make(map[net.Conn]struct{})

	server := &http.Server{
		Handler: s,
		ConnState: func(conn net.Conn, state http.ConnState) {
			s.mu.Lock()
			defer s.mu.Unlock()

			switch state {
			case http.StateNew:
				if s.closed {
					conn.Close()
					return
				}
				s.activeConns[conn] = struct{}{}
			case http.StateClosed:
				delete(s.activeConns, conn)
			}
		},
	}

	s.context.Stopper.RunWorker(func() {
		if err := server.Serve(s.listener); err != nil && !isClosedConnection(err) {
			log.Fatal(err)
		}
	})

	s.context.Stopper.RunWorker(func() {
		<-s.context.Stopper.ShouldStop()
		s.Close()
	})
}
예제 #22
0
// SimulateNetwork creates nodeCount gossip nodes. The network should
// be set to either "tcp" or "unix". The gossipInterval should be set
// to a compressed simulation timescale, though large enough to give
// the concurrent goroutines enough time to pass data back and forth
// in order to yield accurate estimates of how old data actually ends
// up being at the various nodes. After each gossipInterval period,
// simCallback is invoked; when it returns false, the simulation
// ends. If it returns true, the simulation continues another cycle.
//
// Node0 gossips the node count as well as the gossip sentinel. The
// gossip bootstrap hosts are set to the first three nodes (or fewer if
// less than three are available).
//
// At each cycle of the simulation, node 0 gossips the sentinel. If
// the simulation requires other nodes to gossip, this should be done
// via simCallback.
//
// The simulation callback receives a map of nodes, keyed by node address.
func SimulateNetwork(nodeCount int, network string, gossipInterval time.Duration,
	simCallback func(cycle int, nodes map[string]*Gossip) bool) {

	// seed the random number generator for non-determinism across
	// multiple runs.
	rand.Seed(time.Now().UTC().UnixNano())

	tlsConfig := rpc.LoadInsecureTLSConfig()

	log.Infof("simulating network with %d nodes", nodeCount)
	servers := make([]*rpc.Server, nodeCount)
	addrs := make([]net.Addr, nodeCount)
	for i := 0; i < nodeCount; i++ {
		addr := util.CreateTestAddr(network)
		servers[i] = rpc.NewServer(addr, tlsConfig)
		if err := servers[i].Start(); err != nil {
			log.Fatal(err)
		}
		addrs[i] = servers[i].Addr()
	}
	var bootstrap []net.Addr
	if nodeCount < 3 {
		bootstrap = addrs
	} else {
		bootstrap = addrs[:3]
	}

	nodes := make(map[string]*Gossip, nodeCount)
	for i := 0; i < nodeCount; i++ {
		node := New(tlsConfig)
		node.Name = fmt.Sprintf("Node%d", i)
		node.SetBootstrap(bootstrap)
		node.SetInterval(gossipInterval)
		node.Start(servers[i])
		// Node 0 gossips node count.
		if i == 0 {
			node.AddInfo(KeyNodeCount, int64(nodeCount), time.Hour)
		}
		nodes[addrs[i].String()] = node
	}

	gossipTimeout := time.Tick(gossipInterval)
	var complete bool
	for cycle := 0; !complete; cycle++ {
		select {
		case <-gossipTimeout:
			// Node 0 gossips sentinel every cycle.
			nodes[addrs[0].String()].AddInfo(KeySentinel, int64(cycle), time.Hour)
			if !simCallback(cycle, nodes) {
				complete = true
			}
		}
	}

	// Stop all servers & nodes.
	for i := 0; i < nodeCount; i++ {
		servers[i].Close()
		nodes[addrs[i].String()].Stop()
	}
}
예제 #23
0
func (lt *localInterceptableTransport) handleMessage(msg *RaftMessageRequest) {
	ack := make(chan struct{})
	iMsg := &interceptMessage{
		args: msg,
		ack:  ack,
	}
	// The following channel ops are not protected by a select with
	// ShouldStop since we are running under a StartTask and leaving
	// things partially complete here could prevent other components
	// from shutting down cleanly.
	lt.Events <- iMsg
	<-ack
	lt.mu.Lock()
	srv, ok := lt.listeners[msg.ToReplica.StoreID]
	lt.mu.Unlock()
	if !ok {
		return
	}
	_, err := srv.RaftMessage(msg)
	if err == ErrStopped {
		return
	} else if err != nil {
		log.Fatal(err)
	}
}
예제 #24
0
파일: main.go 프로젝트: rissoa/cockroach
// request returns the result of performing a http get request.
func request(url string, httpClient *http.Client) ([]byte, bool) {
	for r := retry.Start(retryOptions); r.Next(); {
		req, err := http.NewRequest("GET", url, nil)
		if err != nil {
			log.Fatal(err)
			return nil, false
		}
		req.Header.Set(util.AcceptHeader, util.JSONContentType)
		resp, err := httpClient.Do(req)
		if err != nil {
			log.Infof("could not GET %s - %s", url, err)
			continue
		}
		defer resp.Body.Close()
		body, err := ioutil.ReadAll(resp.Body)
		if err != nil {
			log.Infof("could not ready body for %s - %s", url, err)
			continue
		}
		if resp.StatusCode != http.StatusOK {
			log.Infof("could not GET %s - statuscode: %d - body: %s", url, resp.StatusCode, body)
			continue
		}
		returnedContentType := resp.Header.Get(util.ContentTypeHeader)
		if returnedContentType != util.JSONContentType {
			log.Infof("unexpected content type: %v", returnedContentType)
			continue
		}
		log.Infof("OK response from %s", url)
		return body, true
	}
	log.Warningf("There was an error retrieving %s", url)
	return nil, false
}
예제 #25
0
func (l *LocalCluster) runDockerSpy() {
	l.panicOnStop()

	create := func() (*Container, error) {
		return createContainer(l,
			container.Config{
				Image: dockerspyImage + ":" + dockerspyTag,
				Cmd:   strslice.New("--dns-domain=" + domain),
			}, container.HostConfig{
				Binds:           []string{"/var/run/docker.sock:/var/run/docker.sock"},
				PublishAllPorts: true,
			},
			"docker-spy",
		)
	}
	c, err := create()
	if dockerclient.IsErrImageNotFound(err) {
		if err := pullImage(l, types.ImagePullOptions{ImageID: dockerspyImage, Tag: dockerspyTag}); err != nil {
			log.Fatal(err)
		}

		c, err = create()
	}
	maybePanic(err)
	maybePanic(c.Start())
	l.dns = c
	if ci, err := c.Inspect(); err != nil {
		log.Error(err)
	} else {
		log.Infof("started %s: %s", c.Name(), ci.NetworkSettings.IPAddress)
	}
}
예제 #26
0
func (r *Replica) leasePostCommitTrigger(
	ctx context.Context,
	trigger PostCommitTrigger,
	replicaID roachpb.ReplicaID,
	prevLease *roachpb.Lease, // TODO(tschottdorf): could this not be nil?
) {
	if prevLease.Replica.StoreID != trigger.lease.Replica.StoreID {
		// The lease is changing hands. Is this replica the new lease holder?
		if trigger.lease.Replica.ReplicaID == replicaID {
			// If this replica is a new holder of the lease, update the low water
			// mark of the timestamp cache. Note that clock offset scenarios are
			// handled via a stasis period inherent in the lease which is documented
			// in on the Lease struct.
			//
			// The introduction of lease transfers implies that the previous lease
			// may have been shortened and we are now applying a formally overlapping
			// lease (since the old lease holder has promised not to serve any more
			// requests, this is kosher). This means that we don't use the old
			// lease's expiration but instead use the new lease's start to initialize
			// the timestamp cache low water.
			log.Infof(ctx, "new range lease %s following %s [physicalTime=%s]",
				trigger.lease, prevLease, r.store.Clock().PhysicalTime())
			r.mu.Lock()
			r.mu.tsCache.SetLowWater(trigger.lease.Start)
			r.mu.Unlock()

			// Gossip the first range whenever its lease is acquired. We check to
			// make sure the lease is active so that a trailing replica won't process
			// an old lease request and attempt to gossip the first range.
			if r.IsFirstRange() && trigger.lease.Covers(r.store.Clock().Now()) {
				func() {
					r.mu.Lock()
					defer r.mu.Unlock()
					r.gossipFirstRangeLocked(ctx)
				}()
			}
		} else if trigger.lease.Covers(r.store.Clock().Now()) {
			if err := r.withRaftGroup(func(raftGroup *raft.RawNode) error {
				if raftGroup.Status().RaftState == raft.StateLeader {
					// If this replica is the raft leader but it is not the new lease
					// holder, then try to transfer the raft leadership to match the
					// lease.
					log.Infof(ctx, "range %v: replicaID %v transfer raft leadership to replicaID %v",
						r.RangeID, replicaID, trigger.lease.Replica.ReplicaID)
					raftGroup.TransferLeader(uint64(trigger.lease.Replica.ReplicaID))
				}
				return nil
			}); err != nil {
				// An error here indicates that this Replica has been destroyed
				// while lacking the necessary synchronization (or even worse, it
				// fails spuriously - could be a storage error), and so we avoid
				// sweeping that under the rug.
				//
				// TODO(tschottdorf): this error is not handled any more
				// at this level.
				log.Fatal(ctx, NewReplicaCorruptionError(err))
			}
		}
	}
}
예제 #27
0
func ExampleDB_Put_insecure() {
	s := &server.TestServer{}
	s.Ctx = server.NewTestContext()
	s.Ctx.Insecure = true
	if pErr := s.Start(); pErr != nil {
		log.Fatalf("Could not start server: %v", pErr)
	}
	defer s.Stop()

	db, err := client.Open(s.Stopper(), "rpc://foo@"+s.ServingAddr())
	if err != nil {
		log.Fatal(err)
	}

	if pErr := db.Put("aa", "1"); pErr != nil {
		panic(pErr)
	}
	result, pErr := db.Get("aa")
	if pErr != nil {
		panic(pErr)
	}
	fmt.Printf("aa=%s\n", result.ValueBytes())

	// Output:
	// aa=1
}
예제 #28
0
파일: bank.go 프로젝트: huaxling/cockroach
// Read the balances in all the accounts and return them.
func (bank *Bank) sumAllAccounts() int64 {
	var result int64
	err := bank.db.Txn(func(txn *client.Txn) error {
		rows, err := txn.Scan(bank.makeAccountID(0), bank.makeAccountID(bank.numAccounts), int64(bank.numAccounts))
		if err != nil {
			return err
		}
		if len(rows) != bank.numAccounts {
			return fmt.Errorf("Could only read %d of %d rows of the database.\n", len(rows), bank.numAccounts)
		}
		// Sum up the balances.
		for i := 0; i < bank.numAccounts; i++ {
			account := &Account{}
			err := account.decode(rows[i].ValueBytes())
			if err != nil {
				return err
			}
			// fmt.Printf("Account %d contains %d$\n", bank.firstAccount+i, account.Balance)
			result += account.Balance
		}
		return nil
	})
	if err != nil {
		log.Fatal(err)
	}
	return result
}
예제 #29
0
func createTestClientFor(stopper *stop.Stopper, addr, user string) *client.DB {
	db, err := client.Open(stopper, "rpcs://"+user+"@"+addr+"?certs="+security.EmbeddedCertsDir)
	if err != nil {
		log.Fatal(err)
	}
	return db
}
예제 #30
0
func ExampleDB_Insecure() {
	s := &server.TestServer{}
	s.Ctx = server.NewTestContext()
	s.Ctx.Insecure = true
	if err := s.Start(); err != nil {
		log.Fatalf("Could not start server: %v", err)
	}
	log.Infof("Test server listening on %s: %s", s.Ctx.RequestScheme(), s.ServingAddr())
	defer s.Stop()

	db, err := client.Open("http://root@" + s.ServingAddr())
	if err != nil {
		log.Fatal(err)
	}

	if err := db.Put("aa", "1"); err != nil {
		panic(err)
	}
	result, err := db.Get("aa")
	if err != nil {
		panic(err)
	}
	fmt.Printf("aa=%s\n", result.ValueBytes())

	// Output:
	// aa=1
}