Esempio n. 1
0
func (n *nodeRunner) handleReadyEvent(ctx context.Context, node *swarmnode.Node, ready chan struct{}) {
	select {
	case <-node.Ready():
		n.mu.Lock()
		n.err = nil
		n.mu.Unlock()
		close(ready)
	case <-ctx.Done():
	}
	n.cluster.configEvent <- struct{}{}
}
Esempio n. 2
0
func initClusterSpec(node *swarmnode.Node, spec types.Spec) error {
	ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
	for conn := range node.ListenControlSocket(ctx) {
		if ctx.Err() != nil {
			return ctx.Err()
		}
		if conn != nil {
			client := swarmapi.NewControlClient(conn)
			var cluster *swarmapi.Cluster
			for i := 0; ; i++ {
				lcr, err := client.ListClusters(ctx, &swarmapi.ListClustersRequest{})
				if err != nil {
					return fmt.Errorf("error on listing clusters: %v", err)
				}
				if len(lcr.Clusters) == 0 {
					if i < 10 {
						time.Sleep(200 * time.Millisecond)
						continue
					}
					return errors.New("empty list of clusters was returned")
				}
				cluster = lcr.Clusters[0]
				break
			}
			// In init, we take the initial default values from swarmkit, and merge
			// any non nil or 0 value from spec to GRPC spec. This will leave the
			// default value alone.
			// Note that this is different from Update(), as in Update() we expect
			// user to specify the complete spec of the cluster (as they already know
			// the existing one and knows which field to update)
			clusterSpec, err := convert.MergeSwarmSpecToGRPC(spec, cluster.Spec)
			if err != nil {
				return fmt.Errorf("error updating cluster settings: %v", err)
			}
			_, err = client.UpdateCluster(ctx, &swarmapi.UpdateClusterRequest{
				ClusterID:      cluster.ID,
				ClusterVersion: &cluster.Meta.Version,
				Spec:           &clusterSpec,
			})
			if err != nil {
				return fmt.Errorf("error updating cluster settings: %v", err)
			}
			return nil
		}
	}
	return ctx.Err()
}
Esempio n. 3
0
func (n *nodeRunner) handleControlSocketChange(ctx context.Context, node *swarmnode.Node) {
	for conn := range node.ListenControlSocket(ctx) {
		n.mu.Lock()
		if n.grpcConn != conn {
			if conn == nil {
				n.controlClient = nil
				n.logsClient = nil
			} else {
				n.controlClient = swarmapi.NewControlClient(conn)
				n.logsClient = swarmapi.NewLogsClient(conn)
			}
		}
		n.grpcConn = conn
		n.mu.Unlock()
		n.cluster.configEvent <- struct{}{}
	}
}
Esempio n. 4
0
func (n *nodeRunner) handleNodeExit(node *swarmnode.Node) {
	err := detectLockedError(node.Err(context.Background()))
	if err != nil {
		logrus.Errorf("cluster exited with error: %v", err)
	}
	n.mu.Lock()
	n.swarmNode = nil
	n.err = err
	close(n.done)
	select {
	case <-n.ready:
		n.enableReconnectWatcher()
	default:
		if n.repeatedRun {
			n.enableReconnectWatcher()
		}
	}
	n.repeatedRun = true
	n.mu.Unlock()
}