예제 #1
0
// New creates an instance of a gossip node.
func New(rpcContext *rpc.Context, resolvers []resolver.Resolver) *Gossip {
	g := &Gossip{
		Connected:     make(chan struct{}),
		server:        newServer(),
		outgoing:      makeNodeSet(1),
		bootstrapping: map[string]struct{}{},
		clients:       []*client{},
		disconnected:  make(chan *client, 10),
		stalled:       make(chan struct{}, 1),
		resolverIdx:   len(resolvers) - 1,
		resolvers:     resolvers,
	}
	// The gossip RPC context doesn't measure clock offsets, isn't
	// shared with the other RPC clients which the node may be using,
	// and disables reconnects to make it possible to know for certain
	// which other nodes in the cluster are incoming via gossip.
	if rpcContext != nil {
		g.rpcContext = rpcContext.Copy()
		g.rpcContext.DisableCache = true
		g.rpcContext.DisableReconnects = true
		g.rpcContext.RemoteClocks = nil
	}

	// Add ourselves as a SystemConfig watcher.
	g.is.registerCallback(KeySystemConfig, g.updateSystemConfig)
	return g
}
예제 #2
0
파일: client.go 프로젝트: the872/cockroach
// start dials the remote addr and commences gossip once connected. Upon exit,
// the client is sent on the disconnected channel. This method starts client
// processing in a goroutine and returns immediately.
func (c *client) start(g *Gossip, disconnected chan *client, ctx *rpc.Context, stopper *stop.Stopper) {
	stopper.RunWorker(func() {
		defer func() {
			disconnected <- c
		}()

		// Note: avoid using `grpc.WithBlock` here. This code is already
		// asynchronous from the caller's perspective, so the only effect of
		// `WithBlock` here is blocking shutdown - at the time of this writing,
		// that ends ups up making `kv` tests take twice as long.
		conn, err := ctx.GRPCDial(c.addr.String())
		if err != nil {
			log.Errorf("failed to dial: %v", err)
			return
		}

		// Start gossiping.
		if err := c.gossip(g, NewGossipClient(conn), stopper); err != nil {
			if !grpcutil.IsClosedConnection(err) {
				g.mu.Lock()
				peerID := c.peerID
				g.mu.Unlock()
				if peerID != 0 {
					log.Infof("closing client to node %d (%s): %s", peerID, c.addr, err)
				} else {
					log.Infof("closing client to %s: %s", c.addr, err)
				}
			}
		}
	})
}
예제 #3
0
// New creates an instance of a gossip node.
func New(rpcContext *rpc.Context, gossipInterval time.Duration, resolvers []resolver.Resolver) *Gossip {
	g := &Gossip{
		Connected:     make(chan struct{}),
		RPCContext:    rpcContext,
		server:        newServer(gossipInterval),
		outgoing:      makeNodeSet(MaxPeers),
		bootstrapping: map[string]struct{}{},
		clients:       []*client{},
		disconnected:  make(chan *client, MaxPeers),
		stalled:       make(chan struct{}, 1),
		resolvers:     resolvers,
	}
	// Create the bootstrapping RPC context. This context doesn't
	// measure clock offsets and doesn't cache clients because bootstrap
	// connections may go through a load balancer.
	if rpcContext != nil {
		g.bsRPCContext = rpcContext.Copy()
		g.bsRPCContext.DisableCache = true
		g.bsRPCContext.RemoteClocks = nil
	}

	// Add ourselves as a SystemConfig watcher.
	g.is.registerCallback(KeySystemConfig, g.updateSystemConfig)
	return g
}
예제 #4
0
// NewSender returns an implementation of Sender which exposes the Key-Value
// database provided by a Cockroach cluster by connecting via RPC to a
// Cockroach node.
func NewSender(ctx *rpc.Context, target string) (Sender, error) {
	conn, err := ctx.GRPCDial(target)
	if err != nil {
		return nil, err
	}
	return sender{roachpb.NewExternalClient(conn)}, nil
}
예제 #5
0
// start dials the remote addr and commences gossip once connected. Upon exit,
// the client is sent on the disconnected channel. This method starts client
// processing in a goroutine and returns immediately.
func (c *client) start(g *Gossip, disconnected chan *client, ctx *rpc.Context, stopper *stop.Stopper) {
	stopper.RunWorker(func() {
		defer func() {
			disconnected <- c
		}()

		conn, err := ctx.GRPCDial(c.addr.String(), grpc.WithBlock())
		if err != nil {
			log.Errorf("failed to dial: %v", err)
			return
		}

		// Start gossiping.
		if err := c.gossip(g, NewGossipClient(conn), stopper); err != nil {
			if !grpcutil.IsClosedConnection(err) {
				g.mu.Lock()
				peerID := c.peerID
				g.mu.Unlock()
				if peerID != 0 {
					log.Infof("closing client to node %d (%s): %s", peerID, c.addr, err)
				} else {
					log.Infof("closing client to %s: %s", c.addr, err)
				}
			}
		}
	})
}
예제 #6
0
// New creates an instance of a gossip node.
func New(rpcContext *rpc.Context, resolvers []resolver.Resolver, stopper *stop.Stopper) *Gossip {
	g := &Gossip{
		Connected:         make(chan struct{}),
		server:            newServer(stopper),
		outgoing:          makeNodeSet(minPeers),
		bootstrapping:     map[string]struct{}{},
		clients:           []*client{},
		disconnected:      make(chan *client, 10),
		stalled:           make(chan struct{}, 1),
		stallInterval:     defaultStallInterval,
		bootstrapInterval: defaultBootstrapInterval,
		cullInterval:      defaultCullInterval,
		nodeDescs:         map[roachpb.NodeID]*roachpb.NodeDescriptor{},
	}
	g.SetResolvers(resolvers)
	// The gossip RPC context doesn't measure clock offsets, isn't
	// shared with the other RPC clients which the node may be using,
	// and disables reconnects to make it possible to know for certain
	// which other nodes in the cluster are incoming via gossip.
	if rpcContext != nil {
		g.rpcContext = rpcContext.Copy()
		g.rpcContext.DisableCache = true
		g.rpcContext.DisableReconnects = true
		g.rpcContext.RemoteClocks = nil
	}

	// Add ourselves as a SystemConfig watcher.
	g.is.registerCallback(KeySystemConfig, g.updateSystemConfig)
	// Add ourselves as a node descriptor watcher.
	g.is.registerCallback(MakePrefixPattern(KeyNodeIDPrefix), g.updateNodeAddress)

	return g
}
예제 #7
0
// grpcTransportFactory is the default TransportFactory, using GRPC.
func grpcTransportFactory(
	opts SendOptions,
	rpcContext *rpc.Context,
	replicas ReplicaSlice,
	args roachpb.BatchRequest,
) (Transport, error) {
	clients := make([]batchClient, 0, len(replicas))
	for _, replica := range replicas {
		conn, err := rpcContext.GRPCDial(replica.NodeDesc.Address.String())
		if err != nil {
			return nil, err
		}
		argsCopy := args
		argsCopy.Replica = replica.ReplicaDescriptor
		remoteAddr := replica.NodeDesc.Address.String()
		clients = append(clients, batchClient{
			remoteAddr: remoteAddr,
			conn:       conn,
			client:     roachpb.NewInternalClient(conn),
			args:       argsCopy,
			healthy:    rpcContext.IsConnHealthy(remoteAddr),
		})
	}

	// Put known-unhealthy clients last.
	splitHealthy(clients)

	return &grpcTransport{
		opts:           opts,
		rpcContext:     rpcContext,
		orderedClients: clients,
	}, nil
}
예제 #8
0
func newTestServer(t *testing.T, ctx *rpc.Context) (*rpc.Server, net.Listener) {
	s := rpc.NewServer(ctx)

	tlsConfig, err := ctx.GetServerTLSConfig()
	if err != nil {
		t.Fatal(err)
	}

	addr := util.CreateTestAddr("tcp")
	ln, err := util.ListenAndServe(ctx.Stopper, s, addr, tlsConfig)
	if err != nil {
		t.Fatal(err)
	}

	return s, ln
}
예제 #9
0
func (lt *localRPCTransport) Listen(id roachpb.StoreID, handler RaftMessageHandler) error {
	ctx := crpc.Context{
		Context: base.Context{
			Insecure: true,
		},
		Stopper:      lt.stopper,
		DisableCache: true,
	}
	rpcServer := crpc.NewServer(&ctx)
	err := rpcServer.RegisterAsync(raftMessageName, false, /*not public*/
		func(argsI proto.Message, callback func(proto.Message, error)) {
			defer func() {
				// TODO(bdarnell): the http/rpc code is swallowing panics somewhere.
				if p := recover(); p != nil {
					log.Fatalf("caught panic: %s", p)
				}
			}()
			args := argsI.(*RaftMessageRequest)
			err := handler(args)
			callback(&RaftMessageResponse{}, err)
		}, &RaftMessageRequest{})
	if err != nil {
		return err
	}

	tlsConfig, err := ctx.GetServerTLSConfig()
	if err != nil {
		return err
	}

	addr := util.CreateTestAddr("tcp")
	ln, err := util.ListenAndServe(ctx.Stopper, rpcServer, addr, tlsConfig)
	if err != nil {
		return err
	}

	lt.mu.Lock()
	if _, ok := lt.servers[id]; ok {
		log.Fatalf("node %d already listening", id)
	}
	lt.servers[id] = serverWithAddr{rpcServer, ln.Addr()}
	lt.mu.Unlock()

	return nil
}
예제 #10
0
파일: client.go 프로젝트: fndaily/cockroach
// start dials the remote addr and commences gossip once connected.
// Upon exit, the client is sent on the disconnected channel.
// If the client experienced an error, its err field will
// be set. This method starts client processing in a goroutine and
// returns immediately.
func (c *client) start(g *Gossip, disconnected chan *client, ctx *rpc.Context, stopper *stop.Stopper) {
	stopper.RunWorker(func() {
		defer func() {
			disconnected <- c
		}()

		var dialOpt grpc.DialOption
		if ctx.Insecure {
			dialOpt = grpc.WithInsecure()
		} else {
			tlsConfig, err := ctx.GetClientTLSConfig()
			if err != nil {
				log.Error(err)
				return
			}
			dialOpt = grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))
		}

		conn, err := grpc.Dial(c.addr.String(), dialOpt)
		if err != nil {
			log.Errorf("failed to dial: %v", err)
			return
		}
		defer func() {
			if err := conn.Close(); err != nil {
				log.Error(err)
			}
		}()
		c.rpcClient = NewGossipClient(conn)

		// Start gossiping.
		if err := c.gossip(g, stopper); err != nil {
			if !grpcutil.IsClosedConnection(err) {
				if c.peerID != 0 {
					log.Infof("closing client to node %d (%s): %s", c.peerID, c.addr, err)
				} else {
					log.Infof("closing client to %s: %s", c.addr, err)
				}
			}
		}
	})
}
예제 #11
0
func newTestServer(t *testing.T, ctx *rpc.Context) (*rpc.Server, net.Listener) {
	s := rpc.NewServer(ctx)

	tlsConfig, err := ctx.GetServerTLSConfig()
	if err != nil {
		t.Fatal(err)
	}

	// We may be called in a loop, meaning tlsConfig is may be in used by a
	// running server during a call to `util.ListenAndServe`, which may
	// mutate it (due to http2.ConfigureServer). Make a copy to avoid trouble.
	tlsConfigCopy := *tlsConfig

	addr := util.CreateTestAddr("tcp")
	ln, err := util.ListenAndServe(ctx.Stopper, s, addr, &tlsConfigCopy)
	if err != nil {
		t.Fatal(err)
	}

	return s, ln
}
예제 #12
0
func newTestServer(t *testing.T, ctx *rpc.Context, manual bool) (*rpc.Server, net.Listener) {
	var s *rpc.Server
	if manual {
		s = rpc.NewManualServer(ctx)
	} else {
		s = rpc.NewServer(ctx)
	}

	tlsConfig, err := ctx.GetServerTLSConfig()
	if err != nil {
		t.Fatal(err)
	}

	addr := util.CreateTestAddr("tcp")
	ln, err := util.ListenAndServe(ctx.Stopper, s, addr, tlsConfig)
	if err != nil {
		t.Fatal(err)
	}

	return s, ln
}
예제 #13
0
func (lt *localRPCTransport) Listen(id roachpb.StoreID, server ServerInterface) error {
	ctx := crpc.Context{
		Context: base.Context{
			Insecure: true,
		},
		Stopper:      lt.stopper,
		DisableCache: true,
	}
	rpcServer := crpc.NewServer(&ctx)
	err := rpcServer.RegisterAsync(raftMessageName, false, /*not public*/
		func(argsI proto.Message, callback func(proto.Message, error)) {
			args := argsI.(*RaftMessageRequest)
			resp, err := server.RaftMessage(args)
			callback(resp, err)
		}, &RaftMessageRequest{})
	if err != nil {
		return err
	}

	tlsConfig, err := ctx.GetServerTLSConfig()
	if err != nil {
		return err
	}

	addr := util.CreateTestAddr("tcp")
	ln, err := util.ListenAndServe(ctx.Stopper, rpcServer, addr, tlsConfig)
	if err != nil {
		return err
	}

	lt.mu.Lock()
	if _, ok := lt.servers[id]; ok {
		log.Fatalf("node %d already listening", id)
	}
	lt.servers[id] = serverWithAddr{rpcServer, ln.Addr()}
	lt.mu.Unlock()

	return nil
}
예제 #14
0
파일: send.go 프로젝트: nieyy/cockroach
// sendOne invokes the specified RPC on the supplied client when the
// client is ready. On success, the reply is sent on the channel;
// otherwise an error is sent.
//
// Do not call directly, but instead use sendOneFn. Tests mock out this method
// via sendOneFn in order to test various error cases.
func sendOne(client batchClient, timeout time.Duration,
	rpcContext *rpc.Context, done chan batchCall) {
	addr := client.remoteAddr
	if log.V(2) {
		log.Infof("sending request to %s: %+v", addr, client.args)
	}

	// TODO(tamird/tschottdorf): pass this in from DistSender.
	ctx := context.TODO()
	if timeout != 0 {
		ctx, _ = context.WithTimeout(ctx, timeout)
	}

	if localServer := rpcContext.GetLocalInternalServerForAddr(addr); enableLocalCalls && localServer != nil {
		reply, err := localServer.Batch(ctx, &client.args)
		done <- batchCall{reply: reply, err: err}
		return
	}

	go func() {
		c := client.conn
		for state, err := c.State(); state != grpc.Ready; state, err = c.WaitForStateChange(ctx, state) {
			if err != nil {
				done <- batchCall{err: newRPCError(
					util.Errorf("rpc to %s failed: %s", addr, err))}
				return
			}
			if state == grpc.Shutdown {
				done <- batchCall{err: newRPCError(
					util.Errorf("rpc to %s failed as client connection was closed", addr))}
				return
			}
		}

		reply, err := client.client.Batch(ctx, &client.args)
		done <- batchCall{reply: reply, err: err}
	}()
}
예제 #15
0
// sendOne invokes the specified RPC on the supplied client when the
// client is ready. On success, the reply is sent on the channel;
// otherwise an error is sent.
//
// Do not call directly, but instead use sendOneFn. Tests mock out this method
// via sendOneFn in order to test various error cases.
func sendOne(opts SendOptions, rpcContext *rpc.Context, client batchClient, done chan batchCall) {
	addr := client.remoteAddr
	if log.V(2) {
		log.Infof("sending request to %s: %+v", addr, client.args)
	}

	if localServer := rpcContext.GetLocalInternalServerForAddr(addr); enableLocalCalls && localServer != nil {
		ctx, cancel := opts.contextWithTimeout()
		defer cancel()

		reply, err := localServer.Batch(ctx, &client.args)
		done <- batchCall{reply: reply, err: err}
		return
	}

	go func() {
		ctx, cancel := opts.contextWithTimeout()
		defer cancel()

		c := client.conn
		for state, err := c.State(); state != grpc.Ready; state, err = c.WaitForStateChange(ctx, state) {
			if err != nil {
				done <- batchCall{err: newRPCError(
					util.Errorf("rpc to %s failed: %s", addr, err))}
				return
			}
			if state == grpc.Shutdown {
				done <- batchCall{err: newRPCError(
					util.Errorf("rpc to %s failed as client connection was closed", addr))}
				return
			}
		}

		reply, err := client.client.Batch(ctx, &client.args)
		done <- batchCall{reply: reply, err: err}
	}()
}
예제 #16
0
// Send sends one or more RPCs to clients specified by the slice of
// replicas. On success, Send returns the first successful reply. Otherwise,
// Send returns an error if and as soon as the number of failed RPCs exceeds
// the available endpoints less the number of required replies.
func send(opts SendOptions, replicas ReplicaSlice,
	args roachpb.BatchRequest, rpcContext *rpc.Context) (*roachpb.BatchResponse, error) {

	if len(replicas) < 1 {
		return nil, roachpb.NewSendError(
			fmt.Sprintf("insufficient replicas (%d) to satisfy send request of %d",
				len(replicas), 1), false)
	}

	done := make(chan batchCall, len(replicas))

	clients := make([]batchClient, 0, len(replicas))
	for _, replica := range replicas {
		conn, err := rpcContext.GRPCDial(replica.NodeDesc.Address.String())
		if err != nil {
			return nil, err
		}
		argsCopy := args
		argsCopy.Replica = replica.ReplicaDescriptor
		clients = append(clients, batchClient{
			remoteAddr: replica.NodeDesc.Address.String(),
			conn:       conn,
			client:     roachpb.NewInternalClient(conn),
			args:       argsCopy,
		})
	}

	// Put known-unhealthy clients last.
	nHealthy, err := splitHealthy(clients)
	if err != nil {
		return nil, err
	}

	var orderedClients []batchClient
	switch opts.Ordering {
	case orderStable:
		orderedClients = clients
	case orderRandom:
		// Randomly permute order, but keep known-unhealthy clients last.
		shuffleClients(clients[:nHealthy])
		shuffleClients(clients[nHealthy:])

		orderedClients = clients
	}
	// TODO(spencer): going to need to also sort by affinity; closest
	// ping time should win. Makes sense to have the rpc client/server
	// heartbeat measure ping times. With a bit of seasoning, each
	// node will be able to order the healthy replicas based on latency.

	// Send the first request.
	sendOneFn(opts, rpcContext, orderedClients[0], done)
	orderedClients = orderedClients[1:]

	var errors, retryableErrors int

	// Wait for completions.
	var sendNextTimer util.Timer
	defer sendNextTimer.Stop()
	for {
		sendNextTimer.Reset(opts.SendNextTimeout)
		select {
		case <-sendNextTimer.C:
			sendNextTimer.Read = true
			// On successive RPC timeouts, send to additional replicas if available.
			if len(orderedClients) > 0 {
				log.Trace(opts.Context, "timeout, trying next peer")
				sendOneFn(opts, rpcContext, orderedClients[0], done)
				orderedClients = orderedClients[1:]
			}

		case call := <-done:
			err := call.err
			if err == nil {
				if log.V(2) {
					log.Infof("successful reply: %+v", call.reply)
				}

				return call.reply, nil
			}

			// Error handling.
			if log.V(1) {
				log.Warningf("error reply: %s", err)
			}

			errors++

			// Since we have a reconnecting client here, disconnect errors are retryable.
			disconnected := err == io.ErrUnexpectedEOF
			if retryErr, ok := err.(retry.Retryable); disconnected || (ok && retryErr.CanRetry()) {
				retryableErrors++
			}

			if remainingNonErrorRPCs := len(replicas) - errors; remainingNonErrorRPCs < 1 {
				return nil, roachpb.NewSendError(
					fmt.Sprintf("too many errors encountered (%d of %d total): %v",
						errors, len(clients), err), remainingNonErrorRPCs+retryableErrors >= 1)
			}
			// Send to additional replicas if available.
			if len(orderedClients) > 0 {
				log.Trace(opts.Context, "error, trying next peer")
				sendOneFn(opts, rpcContext, orderedClients[0], done)
				orderedClients = orderedClients[1:]
			}
		}
	}
}
예제 #17
0
// start dials the remote addr and commences gossip once connected. Upon exit,
// the client is sent on the disconnected channel. This method starts client
// processing in a goroutine and returns immediately.
func (c *client) start(
	g *Gossip,
	disconnected chan *client,
	rpcCtx *rpc.Context,
	stopper *stop.Stopper,
	nodeID roachpb.NodeID,
	breaker *circuit.Breaker,
) {
	stopper.RunWorker(func() {
		ctx, cancel := context.WithCancel(c.ctx)
		var wg sync.WaitGroup
		defer func() {
			// This closes the outgoing stream, causing any attempt to send or
			// receive to return an error.
			//
			// Note: it is still possible for incoming gossip to be processed after
			// this point.
			cancel()

			// The stream is closed, but there may still be some incoming gossip
			// being processed. Wait until that is complete to avoid racing the
			// client's removal against the discovery of its remote's node ID.
			wg.Wait()
			disconnected <- c
		}()

		consecFailures := breaker.ConsecFailures()
		var stream Gossip_GossipClient
		if err := breaker.Call(func() error {
			// Note: avoid using `grpc.WithBlock` here. This code is already
			// asynchronous from the caller's perspective, so the only effect of
			// `WithBlock` here is blocking shutdown - at the time of this writing,
			// that ends ups up making `kv` tests take twice as long.
			conn, err := rpcCtx.GRPCDial(c.addr.String())
			if err != nil {
				return err
			}
			if stream, err = NewGossipClient(conn).Gossip(ctx); err != nil {
				return err
			}
			return c.requestGossip(g, stream)
		}, 0); err != nil {
			if consecFailures == 0 {
				log.Warningf(ctx, "node %d: failed to start gossip client: %s", nodeID, err)
			}
			return
		}

		// Start gossiping.
		log.Infof(ctx, "node %d: started gossip client to %s", nodeID, c.addr)
		if err := c.gossip(ctx, g, stream, stopper, &wg); err != nil {
			if !grpcutil.IsClosedConnection(err) {
				g.mu.Lock()
				peerID := c.peerID
				g.mu.Unlock()
				if peerID != 0 {
					log.Infof(ctx, "node %d: closing client to node %d (%s): %s", nodeID, peerID, c.addr, err)
				} else {
					log.Infof(ctx, "node %d: closing client to %s: %s", nodeID, c.addr, err)
				}
			}
		}
	})
}