func testLogClient(t *testing.T, addr string) (api.LogsClient, func()) { // Log client logCc, err := grpc.Dial(addr, grpc.WithInsecure()) if err != nil { t.Fatalf("error dialing local server: %v", err) } return api.NewLogsClient(logCc), func() { logCc.Close() } }
func (n *nodeRunner) handleControlSocketChange(ctx context.Context, node *swarmnode.Node) { for conn := range node.ListenControlSocket(ctx) { n.mu.Lock() if n.grpcConn != conn { if conn == nil { n.controlClient = nil n.logsClient = nil } else { n.controlClient = swarmapi.NewControlClient(conn) n.logsClient = swarmapi.NewLogsClient(conn) } } n.grpcConn = conn n.mu.Unlock() n.cluster.configEvent <- struct{}{} } }
func (c *Cluster) startNewNode(conf nodeStartConfig) (*node, error) { if err := c.config.Backend.IsSwarmCompatible(); err != nil { return nil, err } actualLocalAddr := conf.LocalAddr if actualLocalAddr == "" { // If localAddr was not specified, resolve it automatically // based on the route to joinAddr. localAddr can only be left // empty on "join". listenHost, _, err := net.SplitHostPort(conf.ListenAddr) if err != nil { return nil, fmt.Errorf("could not parse listen address: %v", err) } listenAddrIP := net.ParseIP(listenHost) if listenAddrIP == nil || !listenAddrIP.IsUnspecified() { actualLocalAddr = listenHost } else { if conf.RemoteAddr == "" { // Should never happen except using swarms created by // old versions that didn't save remoteAddr. conf.RemoteAddr = "8.8.8.8:53" } conn, err := net.Dial("udp", conf.RemoteAddr) if err != nil { return nil, fmt.Errorf("could not find local IP address: %v", err) } localHostPort := conn.LocalAddr().String() actualLocalAddr, _, _ = net.SplitHostPort(localHostPort) conn.Close() } } var control string if runtime.GOOS == "windows" { control = `\\.\pipe\` + controlSocket } else { control = filepath.Join(c.runtimeRoot, controlSocket) } c.node = nil c.cancelDelay = nil c.stop = false n, err := swarmnode.New(&swarmnode.Config{ Hostname: c.config.Name, ForceNewCluster: conf.forceNewCluster, ListenControlAPI: control, ListenRemoteAPI: conf.ListenAddr, AdvertiseRemoteAPI: conf.AdvertiseAddr, JoinAddr: conf.joinAddr, StateDir: c.root, JoinToken: conf.joinToken, Executor: container.NewExecutor(c.config.Backend), HeartbeatTick: 1, ElectionTick: 3, UnlockKey: conf.lockKey, AutoLockManagers: conf.autolock, }) if err != nil { return nil, err } ctx := context.Background() if err := n.Start(ctx); err != nil { return nil, err } node := &node{ Node: n, done: make(chan struct{}), reconnectDelay: initialReconnectDelay, config: conf, } c.node = node c.actualLocalAddr = actualLocalAddr // not saved c.saveState(conf) c.config.Backend.SetClusterProvider(c) go func() { err := detectLockedError(n.Err(ctx)) if err != nil { logrus.Errorf("cluster exited with error: %v", err) } c.Lock() c.node = nil c.err = err if errors.Cause(err) == ErrSwarmLocked { c.locked = true confClone := conf c.lastNodeConfig = &confClone } c.Unlock() close(node.done) }() go func() { select { case <-n.Ready(): c.Lock() node.ready = true c.err = nil c.Unlock() case <-ctx.Done(): } c.configEvent <- struct{}{} }() go func() { for conn := range n.ListenControlSocket(ctx) { c.Lock() if node.conn != conn { if conn == nil { node.client = nil node.logs = nil } else { node.client = swarmapi.NewControlClient(conn) node.logs = swarmapi.NewLogsClient(conn) } } node.conn = conn c.Unlock() c.configEvent <- struct{}{} } }() return node, nil }