예제 #1
0
// NewContext creates an rpc Context with the supplied values.
func NewContext(baseCtx *base.Context, clock *hlc.Clock, stopper *stop.Stopper) *Context {
	ctx := &Context{
		Context: baseCtx,
	}
	if clock != nil {
		ctx.localClock = clock
	} else {
		ctx.localClock = hlc.NewClock(hlc.UnixNano)
	}
	ctx.Stopper = stopper
	ctx.RemoteClocks = newRemoteClockMonitor(clock, 10*defaultHeartbeatInterval)
	ctx.HeartbeatInterval = defaultHeartbeatInterval
	ctx.HeartbeatTimeout = 2 * defaultHeartbeatInterval

	stopper.RunWorker(func() {
		<-stopper.ShouldDrain()

		ctx.conns.Lock()
		for key, meta := range ctx.conns.cache {
			ctx.removeConn(key, meta.conn)
		}
		ctx.conns.Unlock()
	})

	return ctx
}
예제 #2
0
// NewContext creates an rpc Context with the supplied values.
func NewContext(baseCtx *base.Context, clock *hlc.Clock, stopper *stop.Stopper) *Context {
	var ctx *Context
	if baseCtx != nil {
		// TODO(tamird): This form fools `go vet`; `baseCtx` contains several
		// `sync.Mutex`s, and this deference copies them, which is bad. The problem
		// predates this comment, so I'm kicking the can down the road for now, but
		// we should fix this.
		ctx = &Context{
			Context: *baseCtx,
		}
	} else {
		ctx = new(Context)
	}
	if clock != nil {
		ctx.localClock = clock
	} else {
		ctx.localClock = hlc.NewClock(hlc.UnixNano)
	}
	ctx.Stopper = stopper
	ctx.RemoteClocks = newRemoteClockMonitor(clock)
	ctx.HeartbeatInterval = defaultHeartbeatInterval
	ctx.HeartbeatTimeout = 2 * defaultHeartbeatInterval

	stopper.RunWorker(func() {
		<-stopper.ShouldDrain()

		ctx.conns.Lock()
		for key, conn := range ctx.conns.cache {
			ctx.removeConn(key, conn)
		}
		ctx.conns.Unlock()
	})

	return ctx
}
예제 #3
0
파일: net.go 프로젝트: binlijin/cockroach
// ListenAndServe creates a listener and serves handler on it, closing
// the listener when signalled by the stopper.
func ListenAndServe(stopper *stop.Stopper, handler http.Handler, addr net.Addr, tlsConfig *tls.Config) (net.Listener, error) {
	ln, err := Listen(addr, tlsConfig)
	if err != nil {
		return nil, err
	}

	var mu sync.Mutex
	activeConns := make(map[net.Conn]struct{})

	httpServer := http.Server{
		TLSConfig: tlsConfig,
		Handler:   handler,
		ConnState: func(conn net.Conn, state http.ConnState) {
			mu.Lock()
			switch state {
			case http.StateNew:
				activeConns[conn] = struct{}{}
			case http.StateClosed:
				delete(activeConns, conn)
			}
			mu.Unlock()
		},
	}
	if err := http2.ConfigureServer(&httpServer, nil); err != nil {
		return nil, err
	}

	stopper.RunWorker(func() {
		if err := httpServer.Serve(ln); err != nil && !IsClosedConnection(err) {
			log.Fatal(err)
		}

		<-stopper.ShouldStop()

		mu.Lock()
		for conn := range activeConns {
			conn.Close()
		}
		mu.Unlock()
	})

	stopper.RunWorker(func() {
		<-stopper.ShouldDrain()
		// Some unit tests manually close `ln`, so it may already be closed
		// when we get here.
		if err := ln.Close(); err != nil && !IsClosedConnection(err) {
			log.Fatal(err)
		}
	})

	return ln, nil
}
예제 #4
0
파일: net.go 프로젝트: liugangnhm/cockroach
// ListenAndServe creates a listener and serves handler on it, closing the
// listener when signalled by the stopper. The handling server implements HTTP1
// and HTTP2, with or without TLS. Note that the "real" server also implements
// the postgres wire protocol, and so does not use this function, but the
// pattern used is similar; that implementation is in server/server.go.
func ListenAndServe(stopper *stop.Stopper, handler http.Handler, addr net.Addr, tlsConfig *tls.Config) (net.Listener, error) {
	ln, err := net.Listen(addr.Network(), addr.String())
	if err != nil {
		return ln, err
	}
	stopper.RunWorker(func() {
		<-stopper.ShouldDrain()
		// Some unit tests manually close `ln`, so it may already be closed
		// when we get here.
		FatalIfUnexpected(ln.Close())
	})

	if tlsConfig != nil {
		// We're in TLS mode. ALPN will be used to automatically handle HTTP1 and
		// HTTP2 requests.
		ServeHandler(stopper, handler, tls.NewListener(ln, tlsConfig), tlsConfig)
	} else {
		// We're not in TLS mode. We're going to implement h2c (HTTP2 Clear Text)
		// ourselves.

		m := cmux.New(ln)
		// HTTP2 connections are easy to identify because they have a common
		// preface.
		h2L := m.Match(cmux.HTTP2())
		// All other connections will get the default treatment.
		anyL := m.Match(cmux.Any())

		// Construct our h2c handler function.
		var h2 http2.Server
		serveConnOpts := &http2.ServeConnOpts{
			Handler: handler,
		}
		serveH2 := func(conn net.Conn) {
			h2.ServeConn(conn, serveConnOpts)
		}

		// Start serving HTTP1 on all non-HTTP2 connections.
		serveConn := ServeHandler(stopper, handler, anyL, tlsConfig)

		// Start serving h2c on all HTTP2 connections.
		stopper.RunWorker(func() {
			FatalIfUnexpected(serveConn(h2L, serveH2))
		})

		// Finally start the multiplexing listener.
		stopper.RunWorker(func() {
			FatalIfUnexpected(m.Serve())
		})
	}
	return ln, nil
}
// InitSenderForLocalTestCluster initializes a TxnCoordSender that can be used
// with LocalTestCluster.
func InitSenderForLocalTestCluster(
	nodeDesc *roachpb.NodeDescriptor,
	tracer opentracing.Tracer,
	clock *hlc.Clock,
	latency time.Duration,
	stores client.Sender,
	stopper *stop.Stopper,
	gossip *gossip.Gossip,
) client.Sender {
	var rpcSend rpcSendFn = func(_ SendOptions, _ ReplicaSlice,
		args roachpb.BatchRequest, _ *rpc.Context) (*roachpb.BatchResponse, error) {
		if latency > 0 {
			time.Sleep(latency)
		}
		sp := tracer.StartSpan("node")
		defer sp.Finish()
		ctx := opentracing.ContextWithSpan(context.Background(), sp)
		log.Trace(ctx, args.String())
		br, pErr := stores.Send(ctx, args)
		if br == nil {
			br = &roachpb.BatchResponse{}
		}
		if br.Error != nil {
			panic(roachpb.ErrorUnexpectedlySet(stores, br))
		}
		br.Error = pErr
		if pErr != nil {
			log.Trace(ctx, "error: "+pErr.String())
		}
		return br, nil
	}
	retryOpts := GetDefaultDistSenderRetryOptions()
	retryOpts.Closer = stopper.ShouldDrain()
	distSender := NewDistSender(&DistSenderContext{
		Clock: clock,
		RangeDescriptorCacheSize: defaultRangeDescriptorCacheSize,
		RangeLookupMaxRanges:     defaultRangeLookupMaxRanges,
		LeaderCacheSize:          defaultLeaderCacheSize,
		RPCRetryOptions:          &retryOpts,
		nodeDescriptor:           nodeDesc,
		RPCSend:                  rpcSend,                    // defined above
		RangeDescriptorDB:        stores.(RangeDescriptorDB), // for descriptor lookup
	}, gossip)

	return NewTxnCoordSender(distSender, clock, false /* !linearizable */, tracer,
		stopper, NewTxnMetrics(metric.NewRegistry()))
}
예제 #6
0
파일: net.go 프로젝트: GitGoldie/cockroach
// ListenAndServeGRPC creates a listener and serves the specified grpc Server
// on it, closing the listener when signalled by the stopper.
func ListenAndServeGRPC(stopper *stop.Stopper, server *grpc.Server,
	addr net.Addr) (net.Listener, error) {
	ln, err := net.Listen(addr.Network(), addr.String())
	if err != nil {
		return ln, err
	}

	stopper.RunWorker(func() {
		<-stopper.ShouldDrain()
		server.Stop()
	})

	stopper.RunWorker(func() {
		FatalIfUnexpected(server.Serve(ln))
	})
	return ln, nil
}
예제 #7
0
// ListenAndServeGRPC creates a listener and serves server on it, closing
// the listener when signalled by the stopper.
func ListenAndServeGRPC(stopper *stop.Stopper, server *grpc.Server, addr net.Addr, config *tls.Config) (net.Listener, error) {
	ln, err := util.Listen(addr, config)
	if err != nil {
		return nil, err
	}

	stopper.RunWorker(func() {
		if err := server.Serve(ln); err != nil && !util.IsClosedConnection(err) {
			log.Fatal(err)
		}
	})

	stopper.RunWorker(func() {
		<-stopper.ShouldDrain()
		if err := ln.Close(); err != nil {
			log.Fatal(err)
		}
	})

	return ln, nil
}
예제 #8
0
// InitSenderForLocalTestCluster initializes a TxnCoordSender that can be used
// with LocalTestCluster.
func InitSenderForLocalTestCluster(
	nodeDesc *roachpb.NodeDescriptor,
	tracer opentracing.Tracer,
	clock *hlc.Clock,
	latency time.Duration,
	stores client.Sender,
	stopper *stop.Stopper,
	gossip *gossip.Gossip,
) client.Sender {
	retryOpts := base.DefaultRetryOptions()
	retryOpts.Closer = stopper.ShouldDrain()
	senderTransportFactory := SenderTransportFactory(tracer, stores)
	distSender := NewDistSender(&DistSenderContext{
		Clock: clock,
		RangeDescriptorCacheSize: defaultRangeDescriptorCacheSize,
		RangeLookupMaxRanges:     defaultRangeLookupMaxRanges,
		LeaderCacheSize:          defaultLeaderCacheSize,
		RPCRetryOptions:          &retryOpts,
		nodeDescriptor:           nodeDesc,
		TransportFactory: func(
			opts SendOptions,
			rpcContext *rpc.Context,
			replicas ReplicaSlice,
			args roachpb.BatchRequest,
		) (Transport, error) {
			transport, err := senderTransportFactory(opts, rpcContext, replicas, args)
			if err != nil {
				return nil, err
			}
			return &localTestClusterTransport{transport, latency}, nil
		},
		RangeDescriptorDB: stores.(RangeDescriptorDB), // for descriptor lookup
	}, gossip)

	return NewTxnCoordSender(distSender, clock, false /* !linearizable */, tracer,
		stopper, NewTxnMetrics(metric.NewRegistry()))
}
예제 #9
0
파일: server.go 프로젝트: mrtracy/cockroach
// NewServer creates a Server from a server.Context.
func NewServer(ctx *Context, stopper *stop.Stopper) (*Server, error) {
	if ctx == nil {
		return nil, util.Errorf("ctx must not be null")
	}

	if _, err := net.ResolveTCPAddr("tcp", ctx.Addr); err != nil {
		return nil, util.Errorf("unable to resolve RPC address %q: %v", ctx.Addr, err)
	}

	if ctx.Insecure {
		log.Warning("running in insecure mode, this is strongly discouraged. See --insecure and --certs.")
	}
	// Try loading the TLS configs before anything else.
	if _, err := ctx.GetServerTLSConfig(); err != nil {
		return nil, err
	}
	if _, err := ctx.GetClientTLSConfig(); err != nil {
		return nil, err
	}

	s := &Server{
		Tracer:  tracing.NewTracer(),
		ctx:     ctx,
		mux:     http.NewServeMux(),
		clock:   hlc.NewClock(hlc.UnixNano),
		stopper: stopper,
	}
	s.clock.SetMaxOffset(ctx.MaxOffset)

	s.rpcContext = crpc.NewContext(&ctx.Context, s.clock, stopper)
	stopper.RunWorker(func() {
		s.rpcContext.RemoteClocks.MonitorRemoteOffsets(stopper)
	})

	s.rpc = crpc.NewServer(s.rpcContext)

	s.gossip = gossip.New(s.rpcContext, s.ctx.GossipBootstrapResolvers, stopper)
	s.storePool = storage.NewStorePool(s.gossip, s.clock, ctx.TimeUntilStoreDead, stopper)

	feed := util.NewFeed(stopper)

	// A custom RetryOptions is created which uses stopper.ShouldDrain() as
	// the Closer. This prevents infinite retry loops from occurring during
	// graceful server shutdown
	//
	// Such a loop loop occurs with the DistSender attempts a connection to the
	// local server during shutdown, and receives an internal server error (HTTP
	// Code 5xx). This is the correct error for a server to return when it is
	// shutting down, and is normally retryable in a cluster environment.
	// However, on a single-node setup (such as a test), retries will never
	// succeed because the only server has been shut down; thus, thus the
	// DistSender needs to know that it should not retry in this situation.
	retryOpts := kv.GetDefaultDistSenderRetryOptions()
	retryOpts.Closer = stopper.ShouldDrain()
	ds := kv.NewDistSender(&kv.DistSenderContext{
		Clock:           s.clock,
		RPCContext:      s.rpcContext,
		RPCRetryOptions: &retryOpts,
	}, s.gossip)
	txnRegistry := metric.NewRegistry()
	txnMetrics := kv.NewTxnMetrics(txnRegistry)
	sender := kv.NewTxnCoordSender(ds, s.clock, ctx.Linearizable, s.Tracer, s.stopper, txnMetrics)
	s.db = client.NewDB(sender)

	s.grpc = grpc.NewServer()
	s.raftTransport = storage.NewRaftTransport(storage.GossipAddressResolver(s.gossip), s.grpc, s.rpcContext)

	s.kvDB = kv.NewDBServer(&s.ctx.Context, sender, stopper)
	if err := s.kvDB.RegisterRPC(s.rpc); err != nil {
		return nil, err
	}

	s.leaseMgr = sql.NewLeaseManager(0, *s.db, s.clock)
	s.leaseMgr.RefreshLeases(s.stopper, s.db, s.gossip)
	sqlRegistry := metric.NewRegistry()
	s.sqlExecutor = sql.NewExecutor(*s.db, s.gossip, s.leaseMgr, s.stopper, sqlRegistry)

	s.pgServer = pgwire.MakeServer(&s.ctx.Context, s.sqlExecutor, sqlRegistry)

	// TODO(bdarnell): make StoreConfig configurable.
	nCtx := storage.StoreContext{
		Clock:           s.clock,
		DB:              s.db,
		Gossip:          s.gossip,
		Transport:       s.raftTransport,
		ScanInterval:    s.ctx.ScanInterval,
		ScanMaxIdleTime: s.ctx.ScanMaxIdleTime,
		EventFeed:       feed,
		Tracer:          s.Tracer,
		StorePool:       s.storePool,
		SQLExecutor: sql.InternalExecutor{
			LeaseManager: s.leaseMgr,
		},
		LogRangeEvents: true,
		AllocatorOptions: storage.AllocatorOptions{
			AllowRebalance: true,
			Mode:           s.ctx.BalanceMode,
		},
	}

	s.recorder = status.NewMetricsRecorder(s.clock)
	s.recorder.AddNodeRegistry("sql.%s", sqlRegistry)
	s.recorder.AddNodeRegistry("txn.%s", txnRegistry)

	s.node = NewNode(nCtx, s.recorder, s.stopper, txnMetrics)
	s.admin = newAdminServer(s.db, s.stopper, s.sqlExecutor)
	s.tsDB = ts.NewDB(s.db)
	s.tsServer = ts.NewServer(s.tsDB)
	s.status = newStatusServer(s.db, s.gossip, s.recorder, s.ctx)

	return s, nil
}
예제 #10
0
파일: server.go 프로젝트: mjibson/cockroach
// NewServer creates a Server from a server.Context.
func NewServer(ctx Context, stopper *stop.Stopper) (*Server, error) {
	if _, err := net.ResolveTCPAddr("tcp", ctx.Addr); err != nil {
		return nil, util.Errorf("unable to resolve RPC address %q: %v", ctx.Addr, err)
	}

	if ctx.Insecure {
		log.Warning("running in insecure mode, this is strongly discouraged. See --insecure.")
	}
	// Try loading the TLS configs before anything else.
	if _, err := ctx.GetServerTLSConfig(); err != nil {
		return nil, err
	}
	if _, err := ctx.GetClientTLSConfig(); err != nil {
		return nil, err
	}

	s := &Server{
		Tracer:  tracing.NewTracer(),
		ctx:     ctx,
		mux:     http.NewServeMux(),
		clock:   hlc.NewClock(hlc.UnixNano),
		stopper: stopper,
	}
	s.clock.SetMaxOffset(ctx.MaxOffset)

	s.rpcContext = rpc.NewContext(ctx.Context, s.clock, stopper)
	s.rpcContext.HeartbeatCB = func() {
		if err := s.rpcContext.RemoteClocks.VerifyClockOffset(); err != nil {
			log.Fatal(err)
		}
	}

	s.gossip = gossip.New(s.rpcContext, s.ctx.GossipBootstrapResolvers, stopper)
	s.storePool = storage.NewStorePool(s.gossip, s.clock, ctx.TimeUntilStoreDead, stopper)

	// A custom RetryOptions is created which uses stopper.ShouldDrain() as
	// the Closer. This prevents infinite retry loops from occurring during
	// graceful server shutdown
	//
	// Such a loop loop occurs with the DistSender attempts a connection to the
	// local server during shutdown, and receives an internal server error (HTTP
	// Code 5xx). This is the correct error for a server to return when it is
	// shutting down, and is normally retryable in a cluster environment.
	// However, on a single-node setup (such as a test), retries will never
	// succeed because the only server has been shut down; thus, thus the
	// DistSender needs to know that it should not retry in this situation.
	retryOpts := base.DefaultRetryOptions()
	retryOpts.Closer = stopper.ShouldDrain()
	s.distSender = kv.NewDistSender(&kv.DistSenderContext{
		Clock:           s.clock,
		RPCContext:      s.rpcContext,
		RPCRetryOptions: &retryOpts,
	}, s.gossip)
	txnRegistry := metric.NewRegistry()
	txnMetrics := kv.NewTxnMetrics(txnRegistry)
	sender := kv.NewTxnCoordSender(s.distSender, s.clock, ctx.Linearizable, s.Tracer,
		s.stopper, txnMetrics)
	s.db = client.NewDB(sender)

	s.grpc = rpc.NewServer(s.rpcContext)
	s.raftTransport = storage.NewRaftTransport(storage.GossipAddressResolver(s.gossip), s.grpc, s.rpcContext)

	s.kvDB = kv.NewDBServer(s.ctx.Context, sender, stopper)
	roachpb.RegisterExternalServer(s.grpc, s.kvDB)

	// Set up Lease Manager
	var lmKnobs sql.LeaseManagerTestingKnobs
	if ctx.TestingKnobs.SQLLeaseManager != nil {
		lmKnobs = *ctx.TestingKnobs.SQLLeaseManager.(*sql.LeaseManagerTestingKnobs)
	}
	s.leaseMgr = sql.NewLeaseManager(0, *s.db, s.clock, lmKnobs)
	s.leaseMgr.RefreshLeases(s.stopper, s.db, s.gossip)

	// Set up Executor
	eCtx := sql.ExecutorContext{
		DB:           s.db,
		Gossip:       s.gossip,
		LeaseManager: s.leaseMgr,
		Clock:        s.clock,
	}
	if ctx.TestingKnobs.SQLExecutor != nil {
		eCtx.TestingKnobs = ctx.TestingKnobs.SQLExecutor.(*sql.ExecutorTestingKnobs)
	} else {
		eCtx.TestingKnobs = &sql.ExecutorTestingKnobs{}
	}

	sqlRegistry := metric.NewRegistry()
	s.sqlExecutor = sql.NewExecutor(eCtx, s.stopper, sqlRegistry)

	s.pgServer = pgwire.MakeServer(s.ctx.Context, s.sqlExecutor, sqlRegistry)

	distSQLCtx := distsql.ServerContext{
		DB: s.db,
	}
	s.distSQLServer = distsql.NewServer(distSQLCtx)
	distsql.RegisterDistSQLServer(s.grpc, s.distSQLServer)

	// TODO(bdarnell): make StoreConfig configurable.
	nCtx := storage.StoreContext{
		Clock:                          s.clock,
		DB:                             s.db,
		Gossip:                         s.gossip,
		Transport:                      s.raftTransport,
		RaftTickInterval:               s.ctx.RaftTickInterval,
		ScanInterval:                   s.ctx.ScanInterval,
		ScanMaxIdleTime:                s.ctx.ScanMaxIdleTime,
		ConsistencyCheckInterval:       s.ctx.ConsistencyCheckInterval,
		ConsistencyCheckPanicOnFailure: s.ctx.ConsistencyCheckPanicOnFailure,
		Tracer:    s.Tracer,
		StorePool: s.storePool,
		SQLExecutor: sql.InternalExecutor{
			LeaseManager: s.leaseMgr,
		},
		LogRangeEvents: true,
		AllocatorOptions: storage.AllocatorOptions{
			AllowRebalance: true,
		},
	}
	if ctx.TestingKnobs.Store != nil {
		nCtx.TestingKnobs = *ctx.TestingKnobs.Store.(*storage.StoreTestingKnobs)
	}

	s.recorder = status.NewMetricsRecorder(s.clock)
	s.recorder.AddNodeRegistry("sql.%s", sqlRegistry)
	s.recorder.AddNodeRegistry("txn.%s", txnRegistry)
	s.recorder.AddNodeRegistry("clock-offset.%s", s.rpcContext.RemoteClocks.Registry())

	s.runtime = status.MakeRuntimeStatSampler(s.clock)
	s.recorder.AddNodeRegistry("sys.%s", s.runtime.Registry())

	s.node = NewNode(nCtx, s.recorder, s.stopper, txnMetrics, sql.MakeEventLogger(s.leaseMgr))
	roachpb.RegisterInternalServer(s.grpc, s.node)

	s.tsDB = ts.NewDB(s.db)
	s.tsServer = ts.MakeServer(s.tsDB)

	s.admin = makeAdminServer(s)
	s.status = newStatusServer(s.db, s.gossip, s.recorder, s.ctx.Context, s.rpcContext, s.node.stores)
	for _, gw := range []grpcGatewayServer{&s.admin, s.status, &s.tsServer} {
		gw.RegisterService(s.grpc)
	}

	return s, nil
}