Пример #1
0
// newNode creates new node with specific role(manager or agent) and joins to
// existing cluster. if joinAddr is empty string, then new cluster will be initialized.
// It uses TestExecutor as executor.
func newTestNode(joinAddr, joinToken string) (*testNode, error) {
	tmpDir, err := ioutil.TempDir("", "swarmkit-integration-")
	if err != nil {
		return nil, err
	}

	rAddr := "127.0.0.1:0"
	cAddr := filepath.Join(tmpDir, "control.sock")
	cfg := &node.Config{
		ListenRemoteAPI:  rAddr,
		ListenControlAPI: cAddr,
		JoinAddr:         joinAddr,
		StateDir:         tmpDir,
		Executor:         &TestExecutor{},
		JoinToken:        joinToken,
	}
	node, err := node.New(cfg)
	if err != nil {
		return nil, err
	}
	return &testNode{
		config:   cfg,
		node:     node,
		stateDir: tmpDir,
	}, nil
}
Пример #2
0
// Pause stops the node, and creates a new swarm node while keeping all the state
func (n *testNode) Pause(forceNewCluster bool) error {
	rAddr, err := n.node.RemoteAPIAddr()
	if err != nil {
		rAddr = "127.0.0.1:0"
	}

	if err := n.stop(); err != nil {
		return err
	}

	cfg := n.config
	cfg.ListenRemoteAPI = rAddr
	// If JoinAddr is set, the node will connect to the join addr and ignore any
	// other remotes that are stored in the raft directory.
	cfg.JoinAddr = ""
	cfg.JoinToken = ""
	cfg.ForceNewCluster = forceNewCluster

	node, err := node.New(cfg)
	if err != nil {
		return err
	}
	n.node = node
	return nil
}
Пример #3
0
func (n *nodeRunner) start(conf nodeStartConfig) error {
	var control string
	if runtime.GOOS == "windows" {
		control = `\\.\pipe\` + controlSocket
	} else {
		control = filepath.Join(n.cluster.runtimeRoot, controlSocket)
	}

	swarmnodeConfig := swarmnode.Config{
		Hostname:           n.cluster.config.Name,
		ForceNewCluster:    conf.forceNewCluster,
		ListenControlAPI:   control,
		ListenRemoteAPI:    conf.ListenAddr,
		AdvertiseRemoteAPI: conf.AdvertiseAddr,
		JoinAddr:           conf.joinAddr,
		StateDir:           n.cluster.root,
		JoinToken:          conf.joinToken,
		Executor:           container.NewExecutor(n.cluster.config.Backend),
		HeartbeatTick:      1,
		ElectionTick:       3,
		UnlockKey:          conf.lockKey,
		AutoLockManagers:   conf.autolock,
	}
	if conf.availability != "" {
		avail, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(conf.availability))]
		if !ok {
			return fmt.Errorf("invalid Availability: %q", conf.availability)
		}
		swarmnodeConfig.Availability = swarmapi.NodeSpec_Availability(avail)
	}
	node, err := swarmnode.New(&swarmnodeConfig)
	if err != nil {
		return err
	}
	if err := node.Start(context.Background()); err != nil {
		return err
	}

	n.done = make(chan struct{})
	n.ready = make(chan struct{})
	n.swarmNode = node
	n.config = conf
	savePersistentState(n.cluster.root, conf)

	ctx, cancel := context.WithCancel(context.Background())

	go func() {
		n.handleNodeExit(node)
		cancel()
	}()

	go n.handleReadyEvent(ctx, node, n.ready)
	go n.handleControlSocketChange(ctx, node)

	return nil
}
Пример #4
0
// newNode creates new node with specific role(manager or agent) and joins to
// existing cluster. if joinAddr is empty string, then new cluster will be initialized.
// It uses TestExecutor as executor. If lateBind is set, the remote API port is not
// bound.  If rootCA is set, this root is used to bootstrap the node's TLS certs.
func newTestNode(joinAddr, joinToken string, lateBind bool, rootCA *ca.RootCA) (*testNode, error) {
	tmpDir, err := ioutil.TempDir("", "swarmkit-integration-")
	if err != nil {
		return nil, err
	}

	cAddr := filepath.Join(tmpDir, "control.sock")
	cfg := &node.Config{
		ListenControlAPI: cAddr,
		JoinAddr:         joinAddr,
		StateDir:         tmpDir,
		Executor:         &TestExecutor{},
		JoinToken:        joinToken,
	}
	if !lateBind {
		cfg.ListenRemoteAPI = "127.0.0.1:0"
	}
	if rootCA != nil {
		certDir := filepath.Join(tmpDir, "certificates")
		if err := os.MkdirAll(certDir, 0700); err != nil {
			return nil, err
		}
		certPaths := ca.NewConfigPaths(certDir)
		if err := ioutil.WriteFile(certPaths.RootCA.Cert, rootCA.Cert, 0644); err != nil {
			return nil, err
		}
		if err := ioutil.WriteFile(certPaths.RootCA.Key, rootCA.Key, 0600); err != nil {
			return nil, err
		}
		// generate TLS certs for this manager for bootstrapping, else the node will generate its own CA
		_, err := rootCA.IssueAndSaveNewCertificates(ca.NewKeyReadWriter(certPaths.Node, nil, nil),
			identity.NewID(), ca.ManagerRole, identity.NewID())
		if err != nil {
			return nil, err
		}
	}

	node, err := node.New(cfg)
	if err != nil {
		return nil, err
	}
	return &testNode{
		config:   cfg,
		node:     node,
		stateDir: tmpDir,
	}, nil
}
Пример #5
0
func (c *Cluster) startNewNode(conf nodeStartConfig) (*node, error) {
	if err := c.config.Backend.IsSwarmCompatible(); err != nil {
		return nil, err
	}

	actualLocalAddr := conf.LocalAddr
	if actualLocalAddr == "" {
		// If localAddr was not specified, resolve it automatically
		// based on the route to joinAddr. localAddr can only be left
		// empty on "join".
		listenHost, _, err := net.SplitHostPort(conf.ListenAddr)
		if err != nil {
			return nil, fmt.Errorf("could not parse listen address: %v", err)
		}

		listenAddrIP := net.ParseIP(listenHost)
		if listenAddrIP == nil || !listenAddrIP.IsUnspecified() {
			actualLocalAddr = listenHost
		} else {
			if conf.RemoteAddr == "" {
				// Should never happen except using swarms created by
				// old versions that didn't save remoteAddr.
				conf.RemoteAddr = "8.8.8.8:53"
			}
			conn, err := net.Dial("udp", conf.RemoteAddr)
			if err != nil {
				return nil, fmt.Errorf("could not find local IP address: %v", err)
			}
			localHostPort := conn.LocalAddr().String()
			actualLocalAddr, _, _ = net.SplitHostPort(localHostPort)
			conn.Close()
		}
	}

	var control string
	if runtime.GOOS == "windows" {
		control = `\\.\pipe\` + controlSocket
	} else {
		control = filepath.Join(c.runtimeRoot, controlSocket)
	}

	c.node = nil
	c.cancelDelay = nil
	c.stop = false
	n, err := swarmnode.New(&swarmnode.Config{
		Hostname:           c.config.Name,
		ForceNewCluster:    conf.forceNewCluster,
		ListenControlAPI:   control,
		ListenRemoteAPI:    conf.ListenAddr,
		AdvertiseRemoteAPI: conf.AdvertiseAddr,
		JoinAddr:           conf.joinAddr,
		StateDir:           c.root,
		JoinToken:          conf.joinToken,
		Executor:           container.NewExecutor(c.config.Backend),
		HeartbeatTick:      1,
		ElectionTick:       3,
		UnlockKey:          conf.lockKey,
		AutoLockManagers:   conf.autolock,
	})

	if err != nil {
		return nil, err
	}
	ctx := context.Background()
	if err := n.Start(ctx); err != nil {
		return nil, err
	}
	node := &node{
		Node:           n,
		done:           make(chan struct{}),
		reconnectDelay: initialReconnectDelay,
		config:         conf,
	}
	c.node = node
	c.actualLocalAddr = actualLocalAddr // not saved
	c.saveState(conf)

	c.config.Backend.SetClusterProvider(c)
	go func() {
		err := detectLockedError(n.Err(ctx))
		if err != nil {
			logrus.Errorf("cluster exited with error: %v", err)
		}
		c.Lock()
		c.node = nil
		c.err = err
		if errors.Cause(err) == ErrSwarmLocked {
			c.locked = true
			confClone := conf
			c.lastNodeConfig = &confClone
		}
		c.Unlock()
		close(node.done)
	}()

	go func() {
		select {
		case <-n.Ready():
			c.Lock()
			node.ready = true
			c.err = nil
			c.Unlock()
		case <-ctx.Done():
		}
		c.configEvent <- struct{}{}
	}()

	go func() {
		for conn := range n.ListenControlSocket(ctx) {
			c.Lock()
			if node.conn != conn {
				if conn == nil {
					node.client = nil
					node.logs = nil
				} else {
					node.client = swarmapi.NewControlClient(conn)
					node.logs = swarmapi.NewLogsClient(conn)
				}
			}
			node.conn = conn
			c.Unlock()
			c.configEvent <- struct{}{}
		}
	}()

	return node, nil
}
Пример #6
0
				go func() {
					// setup listening to give access to pprof, expvar, etc.
					if err := http.ListenAndServe(debugAddr, nil); err != nil {
						panic(err)
					}
				}()
			}

			n, err := node.New(&node.Config{
				Hostname:         hostname,
				ForceNewCluster:  forceNewCluster,
				ListenControlAPI: unix,
				ListenRemoteAPI:  addr,
				JoinAddr:         managerAddr,
				StateDir:         stateDir,
				JoinToken:        joinToken,
				ExternalCAs:      externalCAOpt.Value(),
				Executor:         executor,
				HeartbeatTick:    hb,
				ElectionTick:     election,
				AutoLockManagers: autolockManagers,
				UnlockKey:        unlockKey,
			})
			if err != nil {
				return err
			}

			if err := n.Start(ctx); err != nil {
				return err
			}