Exemple #1
0
// IsClosedConnection returns true if err is an error produced by gRPC on closed connections.
func IsClosedConnection(err error) bool {
	if err == context.Canceled || err == transport.ErrConnClosing || grpc.Code(err) == codes.Canceled {
		return true
	}
	if streamErr, ok := err.(transport.StreamError); ok && streamErr.Code == codes.Canceled {
		return true
	}
	return util.IsClosedConnection(err)
}
Exemple #2
0
// IsClosedConnection returns true if err is an error produced by gRPC on closed connections.
func IsClosedConnection(err error) bool {
	if err == context.Canceled ||
		grpc.Code(err) == codes.Canceled ||
		grpc.ErrorDesc(err) == grpc.ErrClientConnClosing.Error() ||
		strings.Contains(err.Error(), "is closing") {
		return true
	}
	if streamErr, ok := err.(transport.StreamError); ok && streamErr.Code == codes.Canceled {
		return true
	}
	return util.IsClosedConnection(err)
}
Exemple #3
0
// readRequests synchronously reads a stream of requests from a
// connection. Each request is handled in a new background goroutine;
// when the handler finishes the response is written to the responses
// channel. When the connection is closed (and any pending requests
// have finished), we close the responses channel.
func (s *Server) readRequests(conn net.Conn, codec rpc.ServerCodec, authHook func(proto.Message, bool) error, responses chan<- serverResponse) {
	var wg sync.WaitGroup
	var closed bool
	defer func() {
		wg.Wait()
		if !closed {
			s.runCloseCallbacks(conn)
		}
		close(responses)
	}()

	for {
		req, meth, args, err := s.readRequest(codec)
		if err != nil {
			if err == io.EOF || err == io.ErrUnexpectedEOF || util.IsClosedConnection(err) {
				closed = true
				s.runCloseCallbacks(conn)
				return
			}
			log.Warningf("rpc: server cannot decode request: %s", err)
			return
		}

		if meth.handler == nil {
			responses <- serverResponse{
				req: req,
				err: util.Errorf("rpc: couldn't find method: %s", req.ServiceMethod),
			}
			continue
		}

		if err := authHook(args, meth.public); err != nil {
			responses <- serverResponse{
				req: req,
				err: err,
			}
			// We got an unauthorized request. For now, leave the connection
			// open. We may want to close it in the future because security.
			continue
		}

		wg.Add(1)
		meth.handler(args, func(reply proto.Message, err error) {
			responses <- serverResponse{
				req:   req,
				reply: reply,
				err:   err,
			}
			wg.Done()
		})
	}
}
Exemple #4
0
// ListenAndServeGRPC creates a listener and serves server on it, closing
// the listener when signalled by the stopper.
func ListenAndServeGRPC(stopper *stop.Stopper, server *grpc.Server, addr net.Addr, config *tls.Config) (net.Listener, error) {
	ln, err := util.Listen(addr, config)
	if err != nil {
		return nil, err
	}

	stopper.RunWorker(func() {
		if err := server.Serve(ln); err != nil && !util.IsClosedConnection(err) {
			log.Fatal(err)
		}
	})

	stopper.RunWorker(func() {
		<-stopper.ShouldDrain()
		if err := ln.Close(); err != nil {
			log.Fatal(err)
		}
	})

	return ln, nil
}
Exemple #5
0
// Start starts the server on the specified port, starts gossip and
// initializes the node using the engines from the server's context.
func (s *Server) Start() error {
	s.initHTTP()

	tlsConfig, err := s.ctx.GetServerTLSConfig()
	if err != nil {
		return err
	}

	// The following code is a specialization of util/net.go's ListenAndServe
	// which adds pgwire support. A single port is used to serve all protocols
	// (pg, http, h2) via the following construction:
	//
	// non-TLS case:
	// net.Listen -> cmux.New -> pgwire.Match -> pgwire.Server.ServeConn
	//               |
	//               -  -> cmux.HTTP2 -> http2.(*Server).ServeConn
	//               -  -> cmux.Any -> http.(*Server).Serve
	//
	// TLS case:
	// net.Listen -> cmux.New -> pgwire.Match -> pgwire.Server.ServeConn
	//               |
	//               -  -> cmux.Any -> tls.NewListener -> http.(*Server).Serve
	//
	// Note that the difference between the TLS and non-TLS cases exists due to
	// Go's lack of an h2c (HTTP2 Clear Text) implementation. See inline comments
	// in util.ListenAndServe for an explanation of how h2c is implemented there
	// and here.

	ln, err := net.Listen("tcp", s.ctx.Addr)
	if err != nil {
		return err
	}
	unresolvedAddr, err := officialAddr(s.ctx.Addr, ln.Addr())
	if err != nil {
		return err
	}
	s.ctx.Addr = unresolvedAddr.String()

	s.stopper.RunWorker(func() {
		<-s.stopper.ShouldDrain()
		if err := ln.Close(); err != nil {
			log.Fatal(err)
		}
	})

	m := cmux.New(ln)
	pgL := m.Match(pgwire.Match)

	var serveConn func(net.Listener, func(net.Conn)) error
	if tlsConfig != nil {
		anyL := m.Match(cmux.Any())
		serveConn = util.ServeHandler(s.stopper, s, tls.NewListener(anyL, tlsConfig), tlsConfig)
	} else {
		h2L := m.Match(cmux.HTTP2())
		anyL := m.Match(cmux.Any())

		var h2 http2.Server
		serveConnOpts := &http2.ServeConnOpts{
			Handler: s,
		}
		serveH2 := func(conn net.Conn) {
			h2.ServeConn(conn, serveConnOpts)
		}

		serveConn = util.ServeHandler(s.stopper, s, anyL, tlsConfig)

		s.stopper.RunWorker(func() {
			util.FatalIfUnexpected(serveConn(h2L, serveH2))
		})
	}

	s.stopper.RunWorker(func() {
		util.FatalIfUnexpected(serveConn(pgL, func(conn net.Conn) {
			if err := s.pgServer.ServeConn(conn); err != nil && !util.IsClosedConnection(err) {
				log.Error(err)
			}
		}))
	})

	s.stopper.RunWorker(func() {
		util.FatalIfUnexpected(m.Serve())
	})

	s.rpcContext.SetLocalServer(s.rpc, s.ctx.Addr)

	s.gossip.Start(s.grpc, unresolvedAddr)

	if err := s.node.start(s.rpc, unresolvedAddr, s.ctx.Engines, s.ctx.NodeAttributes); err != nil {
		return err
	}

	// Begin recording runtime statistics.
	runtime := status.NewRuntimeStatRecorder(s.node.Descriptor.NodeID, s.clock)
	s.tsDB.PollSource(runtime, s.ctx.MetricsFrequency, ts.Resolution10s, s.stopper)

	// Begin recording time series data collected by the status monitor.
	s.tsDB.PollSource(s.recorder, s.ctx.MetricsFrequency, ts.Resolution10s, s.stopper)

	// Begin recording status summaries.
	s.node.startWriteSummaries(s.ctx.MetricsFrequency)

	s.sqlExecutor.SetNodeID(s.node.Descriptor.NodeID)
	// Create and start the schema change manager only after a NodeID
	// has been assigned.
	s.schemaChangeManager = sql.NewSchemaChangeManager(*s.db, s.gossip, s.leaseMgr)
	s.schemaChangeManager.Start(s.stopper)

	log.Infof("starting %s/postgres server at %s", s.ctx.HTTPRequestScheme(), unresolvedAddr)

	return nil
}
Exemple #6
0
// Start starts the server on the specified port, starts gossip and
// initializes the node using the engines from the server's context.
func (s *Server) Start() error {
	s.initHTTP()

	tlsConfig, err := s.ctx.GetServerTLSConfig()
	if err != nil {
		return err
	}

	// The following code is a specialization of util/net.go's ListenAndServe
	// which adds pgwire support. A single port is used to serve all protocols
	// (pg, http, h2) via the following construction:
	//
	// non-TLS case:
	// net.Listen -> cmux.New
	//               |
	//               -  -> pgwire.Match -> pgwire.Server.ServeConn
	//               -  -> cmux.Any -> grpc.(*Server).Serve
	//
	// TLS case:
	// net.Listen -> cmux.New
	//               |
	//               -  -> pgwire.Match -> pgwire.Server.ServeConn
	//               -  -> cmux.Any -> grpc.(*Server).Serve
	//
	// Note that the difference between the TLS and non-TLS cases exists due to
	// Go's lack of an h2c (HTTP2 Clear Text) implementation. See inline comments
	// in util.ListenAndServe for an explanation of how h2c is implemented there
	// and here.

	ln, err := net.Listen("tcp", s.ctx.Addr)
	if err != nil {
		return err
	}
	unresolvedAddr, err := officialAddr(s.ctx.Addr, ln.Addr())
	if err != nil {
		return err
	}
	s.ctx.Addr = unresolvedAddr.String()
	s.rpcContext.SetLocalInternalServer(s.node, s.ctx.Addr)

	s.stopper.RunWorker(func() {
		<-s.stopper.ShouldDrain()
		if err := ln.Close(); err != nil {
			log.Fatal(err)
		}
	})

	m := cmux.New(ln)
	pgL := m.Match(pgwire.Match)
	anyL := m.Match(cmux.Any())

	httpLn, err := net.Listen("tcp", s.ctx.HTTPAddr)
	if err != nil {
		return err
	}
	unresolvedHTTPAddr, err := officialAddr(s.ctx.HTTPAddr, httpLn.Addr())
	if err != nil {
		return err
	}
	s.ctx.HTTPAddr = unresolvedHTTPAddr.String()

	s.stopper.RunWorker(func() {
		<-s.stopper.ShouldDrain()
		if err := httpLn.Close(); err != nil {
			log.Fatal(err)
		}
	})

	if tlsConfig != nil {
		httpMux := cmux.New(httpLn)
		clearL := httpMux.Match(cmux.HTTP1Fast())
		tlsL := httpMux.Match(cmux.Any())

		s.stopper.RunWorker(func() {
			util.FatalIfUnexpected(httpMux.Serve())
		})

		util.ServeHandler(s.stopper, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
			// TODO(tamird): s/308/http.StatusPermanentRedirect/ when it exists.
			http.Redirect(w, r, "https://"+r.Host+r.RequestURI, 308)
		}), clearL, tlsConfig)

		httpLn = tls.NewListener(tlsL, tlsConfig)
	}

	serveConn := util.ServeHandler(s.stopper, s, httpLn, tlsConfig)

	s.stopper.RunWorker(func() {
		util.FatalIfUnexpected(s.grpc.Serve(anyL))
	})

	s.stopper.RunWorker(func() {
		util.FatalIfUnexpected(serveConn(pgL, func(conn net.Conn) {
			if err := s.pgServer.ServeConn(conn); err != nil && !util.IsClosedConnection(err) {
				log.Error(err)
			}
		}))
	})

	if len(s.ctx.SocketFile) != 0 {
		// Unix socket enabled: postgres protocol only.
		unixLn, err := net.Listen("unix", s.ctx.SocketFile)
		if err != nil {
			return err
		}

		s.stopper.RunWorker(func() {
			<-s.stopper.ShouldDrain()
			if err := unixLn.Close(); err != nil {
				log.Fatal(err)
			}
		})

		s.stopper.RunWorker(func() {
			util.FatalIfUnexpected(serveConn(unixLn, func(conn net.Conn) {
				if err := s.pgServer.ServeConn(conn); err != nil && !util.IsClosedConnection(err) {
					log.Error(err)
				}
			}))
		})
	}

	s.gossip.Start(s.grpc, unresolvedAddr)

	if err := s.node.start(unresolvedAddr, s.ctx.Engines, s.ctx.NodeAttributes); err != nil {
		return err
	}

	// Begin recording runtime statistics.
	s.startSampleEnvironment(s.ctx.MetricsSampleInterval)

	// Begin recording time series data collected by the status monitor.
	s.tsDB.PollSource(s.recorder, s.ctx.MetricsSampleInterval, ts.Resolution10s, s.stopper)

	// Begin recording status summaries.
	s.node.startWriteSummaries(s.ctx.MetricsSampleInterval)

	s.sqlExecutor.SetNodeID(s.node.Descriptor.NodeID)
	// Create and start the schema change manager only after a NodeID
	// has been assigned.
	s.schemaChangeManager = sql.NewSchemaChangeManager(*s.db, s.gossip, s.leaseMgr)
	s.schemaChangeManager.Start(s.stopper)

	s.periodicallyCheckForUpdates()

	log.Infof("starting %s server at %s", s.ctx.HTTPRequestScheme(), unresolvedHTTPAddr)
	log.Infof("starting grpc/postgres server at %s", unresolvedAddr)
	if len(s.ctx.SocketFile) != 0 {
		log.Infof("starting postgres server at unix:%s", s.ctx.SocketFile)
	}

	s.stopper.RunWorker(func() {
		util.FatalIfUnexpected(m.Serve())
	})

	// Register admin service. Must happen after serving starts.
	s.stopper.AddCloser(s.admin)
	RegisterAdminServer(s.grpc, s.admin)
	return s.admin.RegisterGRPCGateway(s.ctx)
}
Exemple #7
0
// Start starts the server on the specified port, starts gossip and
// initializes the node using the engines from the server's context.
func (s *Server) Start() error {
	tlsConfig, err := s.ctx.GetServerTLSConfig()
	if err != nil {
		return err
	}

	httpServer := util.MakeServer(s.stopper, tlsConfig, s)
	plainRedirectServer := util.MakeServer(s.stopper, tlsConfig, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		// TODO(tamird): s/308/http.StatusPermanentRedirect/ when it exists.
		http.Redirect(w, r, "https://"+r.Host+r.RequestURI, 308)
	}))

	// The following code is a specialization of util/net.go's ListenAndServe
	// which adds pgwire support. A single port is used to serve all protocols
	// (pg, http, h2) via the following construction:
	//
	// non-TLS case:
	// net.Listen -> cmux.New
	//               |
	//               -  -> pgwire.Match -> pgwire.Server.ServeConn
	//               -  -> cmux.Any -> grpc.(*Server).Serve
	//
	// TLS case:
	// net.Listen -> cmux.New
	//               |
	//               -  -> pgwire.Match -> pgwire.Server.ServeConn
	//               -  -> cmux.Any -> grpc.(*Server).Serve
	//
	// Note that the difference between the TLS and non-TLS cases exists due to
	// Go's lack of an h2c (HTTP2 Clear Text) implementation. See inline comments
	// in util.ListenAndServe for an explanation of how h2c is implemented there
	// and here.

	ln, err := net.Listen("tcp", s.ctx.Addr)
	if err != nil {
		return err
	}
	unresolvedAddr, err := officialAddr(s.ctx.Addr, ln.Addr())
	if err != nil {
		return err
	}
	s.ctx.Addr = unresolvedAddr.String()
	s.rpcContext.SetLocalInternalServer(s.node)

	s.stopper.RunWorker(func() {
		<-s.stopper.ShouldDrain()
		if err := ln.Close(); err != nil {
			log.Fatal(err)
		}
	})

	m := cmux.New(ln)
	pgL := m.Match(pgwire.Match)
	anyL := m.Match(cmux.Any())

	httpLn, err := net.Listen("tcp", s.ctx.HTTPAddr)
	if err != nil {
		return err
	}
	unresolvedHTTPAddr, err := officialAddr(s.ctx.HTTPAddr, httpLn.Addr())
	if err != nil {
		return err
	}
	s.ctx.HTTPAddr = unresolvedHTTPAddr.String()

	s.stopper.RunWorker(func() {
		<-s.stopper.ShouldDrain()
		if err := httpLn.Close(); err != nil {
			log.Fatal(err)
		}
	})

	if tlsConfig != nil {
		httpMux := cmux.New(httpLn)
		clearL := httpMux.Match(cmux.HTTP1Fast())
		tlsL := httpMux.Match(cmux.Any())

		s.stopper.RunWorker(func() {
			util.FatalIfUnexpected(httpMux.Serve())
		})

		s.stopper.RunWorker(func() {
			util.FatalIfUnexpected(plainRedirectServer.Serve(clearL))
		})

		httpLn = tls.NewListener(tlsL, tlsConfig)
	}

	s.stopper.RunWorker(func() {
		util.FatalIfUnexpected(httpServer.Serve(httpLn))
	})

	s.stopper.RunWorker(func() {
		util.FatalIfUnexpected(s.grpc.Serve(anyL))
	})

	s.stopper.RunWorker(func() {
		util.FatalIfUnexpected(httpServer.ServeWith(pgL, func(conn net.Conn) {
			if err := s.pgServer.ServeConn(conn); err != nil && !util.IsClosedConnection(err) {
				log.Error(err)
			}
		}))
	})

	if len(s.ctx.SocketFile) != 0 {
		// Unix socket enabled: postgres protocol only.
		unixLn, err := net.Listen("unix", s.ctx.SocketFile)
		if err != nil {
			return err
		}

		s.stopper.RunWorker(func() {
			<-s.stopper.ShouldDrain()
			if err := unixLn.Close(); err != nil {
				log.Fatal(err)
			}
		})

		s.stopper.RunWorker(func() {
			util.FatalIfUnexpected(httpServer.ServeWith(unixLn, func(conn net.Conn) {
				if err := s.pgServer.ServeConn(conn); err != nil && !util.IsClosedConnection(err) {
					log.Error(err)
				}
			}))
		})
	}

	s.gossip.Start(s.grpc, unresolvedAddr)

	if err := s.node.start(unresolvedAddr, s.ctx.Engines, s.ctx.NodeAttributes); err != nil {
		return err
	}

	// Begin recording runtime statistics.
	s.startSampleEnvironment(s.ctx.MetricsSampleInterval)

	// Begin recording time series data collected by the status monitor.
	s.tsDB.PollSource(s.recorder, s.ctx.MetricsSampleInterval, ts.Resolution10s, s.stopper)

	// Begin recording status summaries.
	s.node.startWriteSummaries(s.ctx.MetricsSampleInterval)

	s.sqlExecutor.SetNodeID(s.node.Descriptor.NodeID)

	// Create and start the schema change manager only after a NodeID
	// has been assigned.
	testingKnobs := new(sql.SchemaChangeManagerTestingKnobs)
	if s.ctx.TestingKnobs.SQLSchemaChangeManager != nil {
		testingKnobs = s.ctx.TestingKnobs.SQLSchemaChangeManager.(*sql.SchemaChangeManagerTestingKnobs)
	}
	sql.NewSchemaChangeManager(testingKnobs, *s.db, s.gossip, s.leaseMgr).Start(s.stopper)

	s.periodicallyCheckForUpdates()

	log.Infof("starting %s server at %s", s.ctx.HTTPRequestScheme(), unresolvedHTTPAddr)
	log.Infof("starting grpc/postgres server at %s", unresolvedAddr)
	if len(s.ctx.SocketFile) != 0 {
		log.Infof("starting postgres server at unix:%s", s.ctx.SocketFile)
	}

	s.stopper.RunWorker(func() {
		util.FatalIfUnexpected(m.Serve())
	})

	// Initialize grpc-gateway mux and context.
	jsonpb := &util.JSONPb{
		EnumsAsInts:  true,
		EmitDefaults: true,
		Indent:       "  ",
	}
	protopb := new(util.ProtoPb)
	gwMux := gwruntime.NewServeMux(
		gwruntime.WithMarshalerOption(gwruntime.MIMEWildcard, jsonpb),
		gwruntime.WithMarshalerOption(util.JSONContentType, jsonpb),
		gwruntime.WithMarshalerOption(util.AltJSONContentType, jsonpb),
		gwruntime.WithMarshalerOption(util.ProtoContentType, protopb),
		gwruntime.WithMarshalerOption(util.AltProtoContentType, protopb),
	)
	gwCtx, gwCancel := context.WithCancel(context.Background())
	s.stopper.AddCloser(stop.CloserFn(gwCancel))

	// Setup HTTP<->gRPC handlers.
	var opts []grpc.DialOption
	if s.ctx.Insecure {
		opts = append(opts, grpc.WithInsecure())
	} else {
		tlsConfig, err := s.ctx.GetClientTLSConfig()
		if err != nil {
			return err
		}
		opts = append(
			opts,
			// TODO(tamird): remove this timeout. It is currently necessary because
			// GRPC will not actually bail on a bad certificate error - it will just
			// retry indefinitely. See https://github.com/grpc/grpc-go/issues/622.
			grpc.WithTimeout(base.NetworkTimeout),
			grpc.WithBlock(),
			grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)),
		)
	}

	conn, err := s.rpcContext.GRPCDial(s.ctx.Addr, opts...)
	if err != nil {
		return util.Errorf("error constructing grpc-gateway: %s; are your certificates valid?", err)
	}

	for _, gw := range []grpcGatewayServer{&s.admin, s.status, &s.tsServer} {
		if err := gw.RegisterGateway(gwCtx, gwMux, conn); err != nil {
			return err
		}
	}

	s.mux.Handle("/", http.FileServer(uiFileSystem))

	// TODO(marc): when cookie-based authentication exists,
	// apply it for all web endpoints.
	s.mux.HandleFunc(debugEndpoint, http.HandlerFunc(handleDebug))
	s.mux.Handle(adminEndpoint, gwMux)
	s.mux.Handle(ts.URLPrefix, gwMux)
	s.mux.Handle(statusPrefix, s.status)
	s.mux.Handle(healthEndpoint, s.status)

	if err := sdnotify.Ready(); err != nil {
		log.Errorf("failed to signal readiness using systemd protocol: %s", err)
	}

	return nil
}