func officialAddr(unresolvedAddr string, resolvedAddr net.Addr) (*util.UnresolvedAddr, error) { unresolvedHost, unresolvedPort, err := net.SplitHostPort(unresolvedAddr) if err != nil { return nil, err } resolvedHost, resolvedPort, err := net.SplitHostPort(resolvedAddr.String()) if err != nil { return nil, err } var host string if unresolvedHost != "" { // A host was provided, use it. host = unresolvedHost } else { // A host was not provided. Ask the system, and fall back to the listener. if hostname, err := os.Hostname(); err == nil { host = hostname } else { host = resolvedHost } } var port string if unresolvedPort != "0" { // A port was provided, use it. port = unresolvedPort } else { // A port was not provided, but the system assigned one. port = resolvedPort } return util.NewUnresolvedAddr(resolvedAddr.Network(), net.JoinHostPort(host, port)), nil }
func (c *Client) dial(addr net.Addr) (net.Conn, error) { type connError struct { cn net.Conn err error } ch := make(chan connError) go func() { nc, err := net.Dial(addr.Network(), addr.String()) ch <- connError{nc, err} }() select { case ce := <-ch: return ce.cn, ce.err case <-time.After(c.netTimeout()): // Too slow. Fall through. } // Close the conn if it does end up finally coming in go func() { ce := <-ch if ce.err == nil { ce.cn.Close() } }() return nil, &ConnectTimeoutError{addr} }
func (c *Client) dial(addr net.Addr) (net.Conn, error) { nc, err := net.DialTimeout(addr.Network(), addr.String(), c.netTimeout()) if err == nil { return nc, nil } return nil, err }
// verifyAddr starts a server listener at the specified addr and // then dials a client to verify a connection is established. func verifyAddr(addr net.Addr, t *testing.T) { ln, err := net.Listen(addr.Network(), addr.String()) if err != nil { t.Error(err) return } acceptChan := make(chan struct{}) go func() { _, err := ln.Accept() if err != nil { t.Error(err) } close(acceptChan) }() addr = ln.Addr() conn, err := net.Dial(addr.Network(), addr.String()) if err != nil { t.Errorf("could not connect to %s", addr) return } select { case <-acceptChan: // success. case <-time.After(500 * time.Millisecond): t.Error("timeout waiting for client connection after 500ms") } conn.Close() }
// createTestNode creates an rpc server using the specified address, // gossip instance, KV database and a node using the specified slice // of engines. The server, clock and node are returned. If gossipBS is // not nil, the gossip bootstrap address is set to gossipBS. func createTestNode( addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T, ) (*grpc.Server, net.Addr, *hlc.Clock, *Node, *stop.Stopper) { cfg := storage.StoreConfig{} stopper := stop.NewStopper() cfg.Clock = hlc.NewClock(hlc.UnixNano) nodeRPCContext := rpc.NewContext(log.AmbientContext{}, nodeTestBaseContext, cfg.Clock, stopper) cfg.ScanInterval = 10 * time.Hour cfg.ConsistencyCheckInterval = 10 * time.Hour grpcServer := rpc.NewServer(nodeRPCContext) serverCfg := makeTestConfig() cfg.Gossip = gossip.NewTest( 0, nodeRPCContext, grpcServer, serverCfg.GossipBootstrapResolvers, stopper, metric.NewRegistry(), ) ln, err := netutil.ListenAndServeGRPC(stopper, grpcServer, addr) if err != nil { t.Fatal(err) } if gossipBS != nil { // Handle possibility of a :0 port specification. if gossipBS.Network() == addr.Network() && gossipBS.String() == addr.String() { gossipBS = ln.Addr() } r, err := resolver.NewResolverFromAddress(gossipBS) if err != nil { t.Fatalf("bad gossip address %s: %s", gossipBS, err) } cfg.Gossip.SetResolvers([]resolver.Resolver{r}) cfg.Gossip.Start(ln.Addr()) } retryOpts := base.DefaultRetryOptions() retryOpts.Closer = stopper.ShouldQuiesce() distSender := kv.NewDistSender(kv.DistSenderConfig{ Clock: cfg.Clock, RPCContext: nodeRPCContext, RPCRetryOptions: &retryOpts, }, cfg.Gossip) cfg.AmbientCtx.Tracer = tracing.NewTracer() sender := kv.NewTxnCoordSender( cfg.AmbientCtx, distSender, cfg.Clock, false, stopper, kv.MakeTxnMetrics(metric.TestSampleInterval), ) cfg.DB = client.NewDB(sender) cfg.Transport = storage.NewDummyRaftTransport() cfg.MetricsSampleInterval = metric.TestSampleInterval node := NewNode(cfg, status.NewMetricsRecorder(cfg.Clock), metric.NewRegistry(), stopper, kv.MakeTxnMetrics(metric.TestSampleInterval), sql.MakeEventLogger(nil)) roachpb.RegisterInternalServer(grpcServer, node) return grpcServer, ln.Addr(), cfg.Clock, node, stopper }
// initDescriptor initializes the node descriptor with the server // address and the node attributes. func (n *Node) initDescriptor(addr net.Addr, attrs proto.Attributes) { n.Descriptor.Address = proto.Addr{ Network: addr.Network(), Address: addr.String(), } n.Descriptor.Attrs = attrs }
func startGossipAtAddr( nodeID roachpb.NodeID, addr net.Addr, stopper *stop.Stopper, t *testing.T, registry *metric.Registry, ) *Gossip { rpcContext := rpc.NewContext(log.AmbientContext{}, &base.Config{Insecure: true}, nil, stopper) server := rpc.NewServer(rpcContext) g := NewTest(nodeID, rpcContext, server, nil, stopper, registry) ln, err := netutil.ListenAndServeGRPC(stopper, server, addr) if err != nil { t.Fatal(err) } addr = ln.Addr() if err := g.SetNodeDescriptor(&roachpb.NodeDescriptor{ NodeID: nodeID, Address: util.MakeUnresolvedAddr(addr.Network(), addr.String()), }); err != nil { t.Fatal(err) } g.start(addr) time.Sleep(time.Millisecond) return g }
// listen and run func ListenAndServe(addr string) error { var netaddr net.Addr var err error if strings.Contains(addr, "/") { netaddr, err = net.ResolveUnixAddr("unix", addr) if err != nil { return err } } else { netaddr, err = net.ResolveTCPAddr("tcp", addr) if err != nil { return err } } // listen l, err := net.Listen(netaddr.Network(), netaddr.String()) if err != nil { return err } // same with ServeRpc http.Handle(GetRpcPath(codecName), &rpcHandler{NewServerCodec}) err = http.Serve(l, nil) return err }
// Dial connects to the address addr and returns a Message-oriented connection. func Dial(addr net.Addr) (*Conn, error) { conn, err := net.Dial(addr.Network(), addr.String()) if err != nil { return nil, err } return WrapConn(conn), nil }
func (fw *firewall) Dial(addr net.Addr) (net.Conn, error) { if fw.rule != nil && !fw.rule.Match(addr) { return nil, &net.OpError{Op: "dial", Net: addr.Network(), Addr: addr, Err: errors.New("unreachable host")} } return fw.t.Dial(addr) }
// Init returns a new RedisLimiter. // Options: // - `address` net.Addr // // @return *RedisLimiter, error func Init(address net.Addr) (*RedisLimiter, error) { rl := &RedisLimiter{ // http://godoc.org/github.com/garyburd/redigo/redis#Pool Pool: &redis.Pool{ MaxIdle: MaxIdle, IdleTimeout: IdleTimeout, TestOnBorrow: func(c redis.Conn, t time.Time) error { _, err := c.Do("PING") return err }, Dial: func() (redis.Conn, error) { c, err := redis.Dial(address.Network(), address.String()) if err != nil { return nil, err } return c, err }, }, PrefixQuota: "RateLimit:Quota:", PrefixRemaining: "RateLimit:Remaining:", PrefixReset: "RateLimit:Reset:", Duration: LimitInterval, Quota: MaxQuota, } _, err := rl.ping() return rl, err }
// start initializes the infostore with the rpc server address and // then begins processing connecting clients in an infinite select // loop via goroutine. Periodically, clients connected and awaiting // the next round of gossip are awoken via the conditional variable. func (s *server) start(addr net.Addr) { s.mu.Lock() defer s.mu.Unlock() s.mu.is.NodeAddr = util.MakeUnresolvedAddr(addr.Network(), addr.String()) broadcast := func() { // Close the old ready and open a new one. This will broadcast to all // receivers and setup a fresh channel to replace the closed one. s.mu.Lock() defer s.mu.Unlock() ready := make(chan struct{}) close(s.mu.ready) s.mu.ready = ready } unregister := s.mu.is.registerCallback(".*", func(_ string, _ roachpb.Value) { broadcast() }) s.stopper.RunWorker(func() { <-s.stopper.ShouldQuiesce() s.mu.Lock() unregister() s.mu.Unlock() broadcast() }) }
func NewAddr(addr net.Addr) *Addr { return &Addr{ Addr: addr, s: addr.String(), n: addr.Network(), } }
//从一个net.Listener里面读取需要Dial的地址(测试用的比较多) func MustGetLocalAddrFromAddr(addr net.Addr) string { tcpAddr, err := net.ResolveTCPAddr(addr.Network(), addr.String()) if err != nil { panic(err) } return "127.0.0.1:" + strconv.Itoa(tcpAddr.Port) }
// start initializes the infostore with the rpc server address and // then begins processing connecting clients in an infinite select // loop via goroutine. Periodically, clients connected and awaiting // the next round of gossip are awoken via the conditional variable. func (s *server) start(grpcServer *grpc.Server, addr net.Addr) { s.mu.Lock() s.is.NodeAddr = util.MakeUnresolvedAddr(addr.Network(), addr.String()) s.mu.Unlock() RegisterGossipServer(grpcServer, s) broadcast := func() { ready := make(chan struct{}) s.mu.Lock() close(s.ready) s.ready = ready s.mu.Unlock() } unregister := s.is.registerCallback(".*", func(_ string, _ roachpb.Value) { broadcast() }) s.stopper.RunWorker(func() { <-s.stopper.ShouldDrain() s.mu.Lock() unregister() s.mu.Unlock() broadcast() }) }
// Listen delegates to `net.Listen` and, if tlsConfig is not nil, to `tls.NewListener`. // The returned listener's Addr() method will return an address with the hostname unresovled, // which means it can be used to initiate TLS connections. func Listen(addr net.Addr, tlsConfig *tls.Config) (net.Listener, error) { ln, err := net.Listen(addr.Network(), addr.String()) if err == nil && tlsConfig != nil { ln = tls.NewListener(ln, tlsConfig) } return ln, err }
func NewPlugin(name string, registrar net.Addr) *Plugin { cmd := exec.Command(name, fmt.Sprintf("--network=%s", registrar.Network()), fmt.Sprintf("--path=%s", registrar.String())) cmd.Stdout = os.Stdout cmd.Stderr = os.Stdout return &Plugin{name: name, command: cmd} }
// NewClient returns a client RPC stub for the specified address // (usually a TCP host:port, but for testing may be a unix domain // socket). The process-wide client RPC cache is consulted first; if // the requested client is not present, it's created and the cache is // updated. Specify opts to fine tune client connection behavior or // nil to use defaults (i.e. indefinite retries with exponential // backoff). // // The Client.Ready channel is closed after the client has connected // and completed one successful heartbeat. The Closed channel is // closed if the client fails to connect or if the client's Close() // method is invoked. func NewClient(addr net.Addr, opts *util.RetryOptions) *Client { clientMu.Lock() if c, ok := clients[addr.String()]; ok { clientMu.Unlock() return c } c := &Client{ addr: addr, Ready: make(chan struct{}), Closed: make(chan struct{}), } clients[c.Addr().String()] = c clientMu.Unlock() // Attempt to dial connection. retryOpts := clientRetryOptions if opts != nil { retryOpts = *opts } retryOpts.Tag = fmt.Sprintf("client %s connection", addr) go func() { err := util.RetryWithBackoff(retryOpts, func() (bool, error) { // TODO(spencer): use crypto.tls. conn, err := net.Dial(addr.Network(), addr.String()) if err != nil { log.Info(err) return false, nil } c.mu.Lock() c.Client = rpc.NewClient(conn) c.lAddr = conn.LocalAddr() c.mu.Unlock() // Ensure at least one heartbeat succeeds before exiting the // retry loop. if err = c.heartbeat(); err != nil { c.Close() return false, err } // Signal client is ready by closing Ready channel. log.Infof("client %s connected", addr) close(c.Ready) // Launch periodic heartbeat. go c.startHeartbeat() return true, nil }) if err != nil { log.Errorf("client %s failed to connect", addr) c.Close() } }() return c }
// Return open an client connection and session, return the session. func newClient(t *testing.T, cont Container, addr net.Addr) Session { conn, err := net.Dial(addr.Network(), addr.String()) fatalIf(t, err) c, err := cont.Connection(conn) fatalIf(t, err) sn, err := c.Session() fatalIf(t, err) return sn }
// GossipNode gossips the node's address, which is necessary before // any messages can be sent to it. Normally done automatically by // AddNode. func (rttc *raftTransportTestContext) GossipNode(nodeID roachpb.NodeID, addr net.Addr) { if err := rttc.gossip.AddInfoProto(gossip.MakeNodeIDKey(nodeID), &roachpb.NodeDescriptor{ Address: util.MakeUnresolvedAddr(addr.Network(), addr.String()), }, time.Hour); err != nil { rttc.t.Fatal(err) } }
// Listen announces on address addr and listens for connections. func Listen(addr net.Addr) (*Listener, error) { listener, err := net.Listen(addr.Network(), addr.String()) if err != nil { return nil, err } l := new(Listener) l.Addr = addr l.listener = listener return l, err }
func (cm *CodecMap) FromNetAddr(a net.Addr) (ma.Multiaddr, error) { if a == nil { return nil, fmt.Errorf("nil multiaddr") } p, err := cm.getAddrParser(a.Network()) if err != nil { return nil, err } return p(a) }
func safeDial(t *testing.T, addr net.Addr) (*rpc.Client, func()) { c, err := rpc.Dial(addr.Network(), addr.String()) if err != nil { t.Fatal(err) } return c, func() { if err := c.Close(); err != nil { t.Fatal(err) } } }
// ListenAndServe creates a listener and serves handler on it, closing // the listener when signalled by the stopper. func ListenAndServe(stopper *stop.Stopper, handler http.Handler, addr net.Addr, config *tls.Config) (net.Listener, error) { ln, err := net.Listen(addr.Network(), addr.String()) if err != nil { return nil, err } newAddr, err := updatedAddr(addr, ln.Addr()) if err != nil { return nil, err } if config != nil { ln = tls.NewListener(ln, config) } stopper.RunWorker(func() { var mu sync.Mutex activeConns := make(map[net.Conn]struct{}) httpServer := http.Server{ Handler: handler, ConnState: func(conn net.Conn, state http.ConnState) { mu.Lock() switch state { case http.StateNew: activeConns[conn] = struct{}{} case http.StateClosed: delete(activeConns, conn) } mu.Unlock() }, } if err := httpServer.Serve(ln); err != nil && !IsClosedConnection(err) { log.Fatal(err) } mu.Lock() for conn := range activeConns { conn.Close() } mu.Unlock() }) stopper.RunWorker(func() { <-stopper.ShouldStop() // Some unit tests manually close `ln`, so it may already be closed // when we get here. if err := ln.Close(); err != nil && !IsClosedConnection(err) { log.Fatal(err) } }) return listener{newAddr, ln}, nil }
// Return open an client connection and session, return the session. func newClient(cont Container, addr net.Addr) Session { conn, err := net.Dial(addr.Network(), addr.String()) panicIf(err) c, err := cont.NewConnection(conn) panicIf(err) c.Open() sn, err := c.NewSession() panicIf(err) panicIf(sn.Open()) return sn }
func (c *RegistrarClient) Register(name, service string, endpoint net.Addr) (string, error) { args := Info{ Name: name, Network: endpoint.Network(), Path: endpoint.String(), Service: service, } cookie := "" err := c.rpcClient.Call("Registrar.RegisterPlugin", args, &cookie) return cookie, err }
func AddrToString(addr net.Addr) string { if addr == nil { return "-" } else { c := addr.String() if len(c) == 0 { return addr.Network() } else { return addr.Network() + "://" + c } } }
// NewClient returns a client RPC stub for the specified address // (usually a TCP host:port, but for testing may be a unix domain // socket). The process-wide client RPC cache is consulted first; if // the requested client is not present, it's created and the cache is // updated. Specify opts to fine tune client connection behavior or // nil to use defaults (i.e. indefinite retries with exponential // backoff). // // The Closed channel is closed if the client's Close() method is // invoked. func NewClient(addr net.Addr, context *Context) *Client { clientMu.Lock() defer clientMu.Unlock() unresolvedAddr := util.MakeUnresolvedAddr(addr.Network(), addr.String()) key := fmt.Sprintf("%s@%s", context.User, unresolvedAddr) if !context.DisableCache { if c, ok := clients[key]; ok { return c } } tlsConfig, err := context.GetClientTLSConfig() if err != nil { log.Fatal(err) } c := &Client{ closer: make(chan struct{}), Closed: make(chan struct{}), key: key, addr: unresolvedAddr, tlsConfig: tlsConfig, disableReconnects: context.DisableReconnects, clock: context.localClock, remoteClocks: context.RemoteClocks, } c.healthy.Store(make(chan struct{})) c.healthWaitTime = time.Now().Add(context.HealthWait) c.healthReceived = make(chan struct{}) if !context.DisableCache { clients[key] = c } retryOpts := clientRetryOptions retryOpts.Closer = context.Stopper.ShouldStop() context.Stopper.RunWorker(func() { c.runHeartbeat(retryOpts) close(c.Closed) if conn := c.internalConn(); conn != nil { conn.client.Close() } }) return c }
// ListenAndServe creates a listener and serves handler on it, closing the // listener when signalled by the stopper. The handling server implements HTTP1 // and HTTP2, with or without TLS. Note that the "real" server also implements // the postgres wire protocol, and so does not use this function, but the // pattern used is similar; that implementation is in server/server.go. func ListenAndServe(stopper *stop.Stopper, handler http.Handler, addr net.Addr, tlsConfig *tls.Config) (net.Listener, error) { ln, err := net.Listen(addr.Network(), addr.String()) if err != nil { return ln, err } stopper.RunWorker(func() { <-stopper.ShouldDrain() // Some unit tests manually close `ln`, so it may already be closed // when we get here. FatalIfUnexpected(ln.Close()) }) if tlsConfig != nil { // We're in TLS mode. ALPN will be used to automatically handle HTTP1 and // HTTP2 requests. ServeHandler(stopper, handler, tls.NewListener(ln, tlsConfig), tlsConfig) } else { // We're not in TLS mode. We're going to implement h2c (HTTP2 Clear Text) // ourselves. m := cmux.New(ln) // HTTP2 connections are easy to identify because they have a common // preface. h2L := m.Match(cmux.HTTP2()) // All other connections will get the default treatment. anyL := m.Match(cmux.Any()) // Construct our h2c handler function. var h2 http2.Server serveConnOpts := &http2.ServeConnOpts{ Handler: handler, } serveH2 := func(conn net.Conn) { h2.ServeConn(conn, serveConnOpts) } // Start serving HTTP1 on all non-HTTP2 connections. serveConn := ServeHandler(stopper, handler, anyL, tlsConfig) // Start serving h2c on all HTTP2 connections. stopper.RunWorker(func() { FatalIfUnexpected(serveConn(h2L, serveH2)) }) // Finally start the multiplexing listener. stopper.RunWorker(func() { FatalIfUnexpected(m.Serve()) }) } return ln, nil }
// createTestNode creates an rpc server using the specified address, // gossip instance, KV database and a node using the specified slice // of engines. The server, clock and node are returned. If gossipBS is // not nil, the gossip bootstrap address is set to gossipBS. func createTestNode(addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T) ( *rpc.Server, net.Addr, *hlc.Clock, *Node, *stop.Stopper) { ctx := storage.StoreContext{} stopper := stop.NewStopper() ctx.Clock = hlc.NewClock(hlc.UnixNano) nodeRPCContext := rpc.NewContext(nodeTestBaseContext, ctx.Clock, stopper) ctx.ScanInterval = 10 * time.Hour rpcServer := rpc.NewServer(nodeRPCContext) grpcServer := grpc.NewServer() tlsConfig, err := nodeRPCContext.GetServerTLSConfig() if err != nil { t.Fatal(err) } ln, err := util.ListenAndServe(stopper, grpcutil.GRPCHandlerFunc(grpcServer, rpcServer), addr, tlsConfig) if err != nil { t.Fatal(err) } g := gossip.New(nodeRPCContext, testContext.GossipBootstrapResolvers, stopper) if gossipBS != nil { // Handle possibility of a :0 port specification. if gossipBS.Network() == addr.Network() && gossipBS.String() == addr.String() { gossipBS = ln.Addr() } r, err := resolver.NewResolverFromAddress(gossipBS) if err != nil { t.Fatalf("bad gossip address %s: %s", gossipBS, err) } g.SetResolvers([]resolver.Resolver{r}) g.Start(grpcServer, ln.Addr()) } ctx.Gossip = g retryOpts := kv.GetDefaultDistSenderRetryOptions() retryOpts.Closer = stopper.ShouldDrain() distSender := kv.NewDistSender(&kv.DistSenderContext{ Clock: ctx.Clock, RPCContext: nodeRPCContext, RPCRetryOptions: &retryOpts, }, g) tracer := tracing.NewTracer() sender := kv.NewTxnCoordSender(distSender, ctx.Clock, false, tracer, stopper) ctx.DB = client.NewDB(sender) // TODO(bdarnell): arrange to have the transport closed. // (or attach LocalRPCTransport.Close to the stopper) ctx.Transport = storage.NewLocalRPCTransport(stopper) ctx.EventFeed = util.NewFeed(stopper) ctx.Tracer = tracer node := NewNode(ctx, metric.NewRegistry(), stopper, nil) return rpcServer, ln.Addr(), ctx.Clock, node, stopper }