Exemple #1
0
func (n *nodeRunner) start(conf nodeStartConfig) error {
	var control string
	if runtime.GOOS == "windows" {
		control = `\\.\pipe\` + controlSocket
	} else {
		control = filepath.Join(n.cluster.runtimeRoot, controlSocket)
	}

	swarmnodeConfig := swarmnode.Config{
		Hostname:           n.cluster.config.Name,
		ForceNewCluster:    conf.forceNewCluster,
		ListenControlAPI:   control,
		ListenRemoteAPI:    conf.ListenAddr,
		AdvertiseRemoteAPI: conf.AdvertiseAddr,
		JoinAddr:           conf.joinAddr,
		StateDir:           n.cluster.root,
		JoinToken:          conf.joinToken,
		Executor:           container.NewExecutor(n.cluster.config.Backend),
		HeartbeatTick:      1,
		ElectionTick:       3,
		UnlockKey:          conf.lockKey,
		AutoLockManagers:   conf.autolock,
	}
	if conf.availability != "" {
		avail, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(conf.availability))]
		if !ok {
			return fmt.Errorf("invalid Availability: %q", conf.availability)
		}
		swarmnodeConfig.Availability = swarmapi.NodeSpec_Availability(avail)
	}
	node, err := swarmnode.New(&swarmnodeConfig)
	if err != nil {
		return err
	}
	if err := node.Start(context.Background()); err != nil {
		return err
	}

	n.done = make(chan struct{})
	n.ready = make(chan struct{})
	n.swarmNode = node
	n.config = conf
	savePersistentState(n.cluster.root, conf)

	ctx, cancel := context.WithCancel(context.Background())

	go func() {
		n.handleNodeExit(node)
		cancel()
	}()

	go n.handleReadyEvent(ctx, node, n.ready)
	go n.handleControlSocketChange(ctx, node)

	return nil
}
Exemple #2
0
func (c *Cluster) startNewNode(forceNewCluster bool, listenAddr, joinAddr, secret, cahash string, ismanager bool) (*swarmagent.Node, context.Context, error) {
	if err := c.checkCompatibility(); err != nil {
		return nil, nil, err
	}
	c.node = nil
	c.cancelDelay = nil
	node, err := swarmagent.NewNode(&swarmagent.NodeConfig{
		Hostname:         c.config.Name,
		ForceNewCluster:  forceNewCluster,
		ListenControlAPI: filepath.Join(c.root, controlSocket),
		ListenRemoteAPI:  listenAddr,
		JoinAddr:         joinAddr,
		StateDir:         c.root,
		CAHash:           cahash,
		Secret:           secret,
		Executor:         container.NewExecutor(c.config.Backend),
		HeartbeatTick:    1,
		ElectionTick:     3,
		IsManager:        ismanager,
	})
	if err != nil {
		return nil, nil, err
	}
	ctx, cancel := context.WithCancel(context.Background())
	if err := node.Start(ctx); err != nil {
		return nil, nil, err
	}

	c.node = node
	c.listenAddr = listenAddr
	c.saveState()
	c.config.Backend.SetClusterProvider(c)
	go func() {
		err := node.Err(ctx)
		if err != nil {
			logrus.Errorf("cluster exited with error: %v", err)
		}
		c.Lock()
		c.conn = nil
		c.client = nil
		c.node = nil
		c.ready = false
		c.err = err
		c.Unlock()
		cancel()
	}()

	go func() {
		select {
		case <-node.Ready(context.Background()):
			c.Lock()
			c.reconnectDelay = initialReconnectDelay
			c.Unlock()
		case <-ctx.Done():
		}
		if ctx.Err() == nil {
			c.Lock()
			c.ready = true
			c.err = nil
			c.Unlock()
		}
		c.configEvent <- struct{}{}
	}()

	go func() {
		for conn := range node.ListenControlSocket(ctx) {
			c.Lock()
			if c.conn != conn {
				c.client = swarmapi.NewControlClient(conn)
			}
			if c.conn != nil {
				c.client = nil
			}
			c.conn = conn
			c.Unlock()
			c.configEvent <- struct{}{}
		}
	}()

	return node, ctx, nil
}
Exemple #3
0
func (c *Cluster) startNewNode(forceNewCluster bool, localAddr, remoteAddr, listenAddr, advertiseAddr, joinAddr, joinToken string) (*node, error) {
	if err := c.config.Backend.IsSwarmCompatible(); err != nil {
		return nil, err
	}

	actualLocalAddr := localAddr
	if actualLocalAddr == "" {
		// If localAddr was not specified, resolve it automatically
		// based on the route to joinAddr. localAddr can only be left
		// empty on "join".
		listenHost, _, err := net.SplitHostPort(listenAddr)
		if err != nil {
			return nil, fmt.Errorf("could not parse listen address: %v", err)
		}

		listenAddrIP := net.ParseIP(listenHost)
		if listenAddrIP == nil || !listenAddrIP.IsUnspecified() {
			actualLocalAddr = listenHost
		} else {
			if remoteAddr == "" {
				// Should never happen except using swarms created by
				// old versions that didn't save remoteAddr.
				remoteAddr = "8.8.8.8:53"
			}
			conn, err := net.Dial("udp", remoteAddr)
			if err != nil {
				return nil, fmt.Errorf("could not find local IP address: %v", err)
			}
			localHostPort := conn.LocalAddr().String()
			actualLocalAddr, _, _ = net.SplitHostPort(localHostPort)
			conn.Close()
		}
	}

	c.node = nil
	c.cancelDelay = nil
	c.stop = false
	n, err := swarmagent.NewNode(&swarmagent.NodeConfig{
		Hostname:           c.config.Name,
		ForceNewCluster:    forceNewCluster,
		ListenControlAPI:   filepath.Join(c.root, controlSocket),
		ListenRemoteAPI:    listenAddr,
		AdvertiseRemoteAPI: advertiseAddr,
		JoinAddr:           joinAddr,
		StateDir:           c.root,
		JoinToken:          joinToken,
		Executor:           container.NewExecutor(c.config.Backend),
		HeartbeatTick:      1,
		ElectionTick:       3,
	})
	if err != nil {
		return nil, err
	}
	ctx := context.Background()
	if err := n.Start(ctx); err != nil {
		return nil, err
	}
	node := &node{
		Node:           n,
		done:           make(chan struct{}),
		reconnectDelay: initialReconnectDelay,
	}
	c.node = node
	c.localAddr = localAddr
	c.actualLocalAddr = actualLocalAddr // not saved
	c.remoteAddr = remoteAddr
	c.listenAddr = listenAddr
	c.advertiseAddr = advertiseAddr
	c.saveState()

	c.config.Backend.SetClusterProvider(c)
	go func() {
		err := n.Err(ctx)
		if err != nil {
			logrus.Errorf("cluster exited with error: %v", err)
		}
		c.Lock()
		c.node = nil
		c.err = err
		c.Unlock()
		close(node.done)
	}()

	go func() {
		select {
		case <-n.Ready():
			c.Lock()
			node.ready = true
			c.err = nil
			c.Unlock()
		case <-ctx.Done():
		}
		c.configEvent <- struct{}{}
	}()

	go func() {
		for conn := range n.ListenControlSocket(ctx) {
			c.Lock()
			if node.conn != conn {
				if conn == nil {
					node.client = nil
				} else {
					node.client = swarmapi.NewControlClient(conn)
				}
			}
			node.conn = conn
			c.Unlock()
			c.configEvent <- struct{}{}
		}
	}()

	return node, nil
}
Exemple #4
0
func (c *Cluster) startNewNode(forceNewCluster bool, listenAddr, joinAddr, secret, cahash string, ismanager bool) (*node, error) {
	if err := c.config.Backend.IsSwarmCompatible(); err != nil {
		return nil, err
	}
	c.node = nil
	c.cancelDelay = nil
	c.stop = false
	n, err := swarmagent.NewNode(&swarmagent.NodeConfig{
		Hostname:         c.config.Name,
		ForceNewCluster:  forceNewCluster,
		ListenControlAPI: filepath.Join(c.root, controlSocket),
		ListenRemoteAPI:  listenAddr,
		JoinAddr:         joinAddr,
		StateDir:         c.root,
		CAHash:           cahash,
		Secret:           secret,
		Executor:         container.NewExecutor(c.config.Backend),
		HeartbeatTick:    1,
		ElectionTick:     3,
		IsManager:        ismanager,
	})
	if err != nil {
		return nil, err
	}
	ctx := context.Background()
	if err := n.Start(ctx); err != nil {
		return nil, err
	}
	node := &node{
		Node:           n,
		done:           make(chan struct{}),
		reconnectDelay: initialReconnectDelay,
	}
	c.node = node
	c.listenAddr = listenAddr
	c.saveState()
	c.config.Backend.SetClusterProvider(c)
	go func() {
		err := n.Err(ctx)
		if err != nil {
			logrus.Errorf("cluster exited with error: %v", err)
		}
		c.Lock()
		c.node = nil
		c.err = err
		c.Unlock()
		close(node.done)
	}()

	go func() {
		select {
		case <-n.Ready():
			c.Lock()
			node.ready = true
			c.err = nil
			c.Unlock()
		case <-ctx.Done():
		}
		c.configEvent <- struct{}{}
	}()

	go func() {
		for conn := range n.ListenControlSocket(ctx) {
			c.Lock()
			if node.conn != conn {
				if conn == nil {
					node.client = nil
				} else {
					node.client = swarmapi.NewControlClient(conn)
				}
			}
			node.conn = conn
			c.Unlock()
			c.configEvent <- struct{}{}
		}
	}()

	return node, nil
}