func updateNodes(dockerCli command.Cli, nodes []string, mergeNode func(node *swarm.Node) error, success func(nodeID string)) error { client := dockerCli.Client() ctx := context.Background() for _, nodeID := range nodes { node, _, err := client.NodeInspectWithRaw(ctx, nodeID) if err != nil { return err } err = mergeNode(&node) if err != nil { if err == errNoRoleChange { continue } return err } err = client.NodeUpdate(ctx, node.ID, node.Version, node.Spec) if err != nil { return err } success(nodeID) } return nil }
func runUpdate(dockerCli command.Cli, flags *pflag.FlagSet, opts swarmOptions) error { client := dockerCli.Client() ctx := context.Background() var updateFlags swarm.UpdateFlags swarmInspect, err := client.SwarmInspect(ctx) if err != nil { return err } prevAutoLock := swarmInspect.Spec.EncryptionConfig.AutoLockManagers opts.mergeSwarmSpec(&swarmInspect.Spec, flags) curAutoLock := swarmInspect.Spec.EncryptionConfig.AutoLockManagers err = client.SwarmUpdate(ctx, swarmInspect.Version, swarmInspect.Spec, updateFlags) if err != nil { return err } fmt.Fprintln(dockerCli.Out(), "Swarm updated.") if curAutoLock && !prevAutoLock { unlockKeyResp, err := client.SwarmGetUnlockKey(ctx) if err != nil { return errors.Wrap(err, "could not fetch unlock key") } printUnlockCommand(ctx, dockerCli, unlockKeyResp.UnlockKey) } return nil }
func runList(dockerCli command.Cli, opts listOptions) error { client := dockerCli.Client() out := dockerCli.Out() ctx := context.Background() nodes, err := client.NodeList( ctx, types.NodeListOptions{Filters: opts.filter.Value()}) if err != nil { return err } if len(nodes) > 0 && !opts.quiet { // only non-empty nodes and not quiet, should we call /info api info, err := client.Info(ctx) if err != nil { return err } printTable(out, nodes, info) } else if !opts.quiet { // no nodes and not quiet, print only one line with columns ID, HOSTNAME, ... printTable(out, nodes, types.Info{}) } else { printQuiet(out, nodes) } return nil }
func runUnlock(dockerCli command.Cli, opts unlockOptions) error { client := dockerCli.Client() ctx := context.Background() // First see if the node is actually part of a swarm, and if it is actually locked first. // If it's in any other state than locked, don't ask for the key. info, err := client.Info(ctx) if err != nil { return err } switch info.Swarm.LocalNodeState { case swarm.LocalNodeStateInactive: return errors.New("Error: This node is not part of a swarm") case swarm.LocalNodeStateLocked: break default: return errors.New("Error: swarm is not locked") } key, err := readKey(dockerCli.In(), "Please enter unlock key: ") if err != nil { return err } req := swarm.UnlockRequest{ UnlockKey: key, } return client.SwarmUnlock(ctx, req) }
func runLeave(dockerCli command.Cli, opts leaveOptions) error { client := dockerCli.Client() ctx := context.Background() if err := client.SwarmLeave(ctx, opts.force); err != nil { return err } fmt.Fprintln(dockerCli.Out(), "Node left the swarm.") return nil }
// Print task information in a table format. // Besides this, command `docker node ps <node>` // and `docker stack ps` will call this, too. func Print(dockerCli command.Cli, ctx context.Context, tasks []swarm.Task, resolver *idresolver.IDResolver, noTrunc bool) error { sort.Stable(tasksBySlot(tasks)) writer := tabwriter.NewWriter(dockerCli.Out(), 0, 4, 2, ' ', 0) // Ignore flushing errors defer writer.Flush() fmt.Fprintln(writer, strings.Join([]string{"ID", "NAME", "IMAGE", "NODE", "DESIRED STATE", "CURRENT STATE", "ERROR", "PORTS"}, "\t")) return print(writer, ctx, tasks, resolver, noTrunc) }
// PrintQuiet shows task list in a quiet way. func PrintQuiet(dockerCli command.Cli, tasks []swarm.Task) error { sort.Stable(tasksBySlot(tasks)) out := dockerCli.Out() for _, task := range tasks { fmt.Fprintln(out, task.ID) } return nil }
func runDemote(dockerCli command.Cli, nodes []string) error { demote := func(node *swarm.Node) error { if node.Spec.Role == swarm.NodeRoleWorker { fmt.Fprintf(dockerCli.Out(), "Node %s is already a worker.\n", node.ID) return errNoRoleChange } node.Spec.Role = swarm.NodeRoleWorker return nil } success := func(nodeID string) { fmt.Fprintf(dockerCli.Out(), "Manager %s demoted in the swarm.\n", nodeID) } return updateNodes(dockerCli, nodes, demote, success) }
func runJoinToken(dockerCli command.Cli, opts joinTokenOptions) error { worker := opts.role == "worker" manager := opts.role == "manager" if !worker && !manager { return errors.New("unknown role " + opts.role) } client := dockerCli.Client() ctx := context.Background() if opts.rotate { flags := swarm.UpdateFlags{ RotateWorkerToken: worker, RotateManagerToken: manager, } sw, err := client.SwarmInspect(ctx) if err != nil { return err } if err := client.SwarmUpdate(ctx, sw.Version, sw.Spec, flags); err != nil { return err } if !opts.quiet { fmt.Fprintf(dockerCli.Out(), "Successfully rotated %s join token.\n\n", opts.role) } } // second SwarmInspect in this function, // this is necessary since SwarmUpdate after first changes the join tokens sw, err := client.SwarmInspect(ctx) if err != nil { return err } if opts.quiet && worker { fmt.Fprintln(dockerCli.Out(), sw.JoinTokens.Worker) return nil } if opts.quiet && manager { fmt.Fprintln(dockerCli.Out(), sw.JoinTokens.Manager) return nil } info, err := client.Info(ctx) if err != nil { return err } return printJoinCommand(ctx, dockerCli, info.Swarm.NodeID, worker, manager) }
func runPs(dockerCli command.Cli, opts psOptions) error { client := dockerCli.Client() ctx := context.Background() var ( errs []string tasks []swarm.Task ) for _, nodeID := range opts.nodeIDs { nodeRef, err := Reference(ctx, client, nodeID) if err != nil { errs = append(errs, err.Error()) continue } node, _, err := client.NodeInspectWithRaw(ctx, nodeRef) if err != nil { errs = append(errs, err.Error()) continue } filter := opts.filter.Value() filter.Add("node", node.ID) nodeTasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: filter}) if err != nil { errs = append(errs, err.Error()) continue } tasks = append(tasks, nodeTasks...) } if err := task.Print(dockerCli, ctx, tasks, idresolver.New(client, opts.noResolve), opts.noTrunc); err != nil { errs = append(errs, err.Error()) } if len(errs) > 0 { return fmt.Errorf("%s", strings.Join(errs, "\n")) } return nil }
func runRemove(dockerCli command.Cli, args []string, opts removeOptions) error { client := dockerCli.Client() ctx := context.Background() var errs []string for _, nodeID := range args { err := client.NodeRemove(ctx, nodeID, types.NodeRemoveOptions{Force: opts.force}) if err != nil { errs = append(errs, err.Error()) continue } fmt.Fprintf(dockerCli.Out(), "%s\n", nodeID) } if len(errs) > 0 { return fmt.Errorf("%s", strings.Join(errs, "\n")) } return nil }
func runInit(dockerCli command.Cli, flags *pflag.FlagSet, opts initOptions) error { client := dockerCli.Client() ctx := context.Background() req := swarm.InitRequest{ ListenAddr: opts.listenAddr.String(), AdvertiseAddr: opts.advertiseAddr, ForceNewCluster: opts.forceNewCluster, Spec: opts.swarmOptions.ToSpec(flags), AutoLockManagers: opts.swarmOptions.autolock, } if flags.Changed(flagAvailability) { availability := swarm.NodeAvailability(strings.ToLower(opts.availability)) switch availability { case swarm.NodeAvailabilityActive, swarm.NodeAvailabilityPause, swarm.NodeAvailabilityDrain: req.Availability = availability default: return fmt.Errorf("invalid availability %q, only active, pause and drain are supported", opts.availability) } } nodeID, err := client.SwarmInit(ctx, req) if err != nil { if strings.Contains(err.Error(), "could not choose an IP address to advertise") || strings.Contains(err.Error(), "could not find the system's IP address") { return errors.New(err.Error() + " - specify one with --advertise-addr") } return err } fmt.Fprintf(dockerCli.Out(), "Swarm initialized: current node (%s) is now a manager.\n\n", nodeID) if err := printJoinCommand(ctx, dockerCli, nodeID, false, true); err != nil { return err } fmt.Fprint(dockerCli.Out(), "To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.\n\n") if req.AutoLockManagers { unlockKeyResp, err := client.SwarmGetUnlockKey(ctx) if err != nil { return errors.Wrap(err, "could not fetch unlock key") } printUnlockCommand(ctx, dockerCli, unlockKeyResp.UnlockKey) } return nil }
func runInspect(dockerCli command.Cli, opts inspectOptions) error { client := dockerCli.Client() ctx := context.Background() getRef := func(ref string) (interface{}, []byte, error) { nodeRef, err := Reference(ctx, client, ref) if err != nil { return nil, nil, err } node, _, err := client.NodeInspectWithRaw(ctx, nodeRef) return node, nil, err } if !opts.pretty { return inspect.Inspect(dockerCli.Out(), opts.nodeIds, opts.format, getRef) } return printHumanFriendly(dockerCli.Out(), opts.nodeIds, getRef) }
func runUnlockKey(dockerCli command.Cli, opts unlockKeyOptions) error { client := dockerCli.Client() ctx := context.Background() if opts.rotate { flags := swarm.UpdateFlags{RotateManagerUnlockKey: true} sw, err := client.SwarmInspect(ctx) if err != nil { return err } if !sw.Spec.EncryptionConfig.AutoLockManagers { return errors.New("cannot rotate because autolock is not turned on") } if err := client.SwarmUpdate(ctx, sw.Version, sw.Spec, flags); err != nil { return err } if !opts.quiet { fmt.Fprintf(dockerCli.Out(), "Successfully rotated manager unlock key.\n\n") } } unlockKeyResp, err := client.SwarmGetUnlockKey(ctx) if err != nil { return errors.Wrap(err, "could not fetch unlock key") } if unlockKeyResp.UnlockKey == "" { return errors.New("no unlock key is set") } if opts.quiet { fmt.Fprintln(dockerCli.Out(), unlockKeyResp.UnlockKey) return nil } printUnlockCommand(ctx, dockerCli, unlockKeyResp.UnlockKey) return nil }
func runJoin(dockerCli command.Cli, flags *pflag.FlagSet, opts joinOptions) error { client := dockerCli.Client() ctx := context.Background() req := swarm.JoinRequest{ JoinToken: opts.token, ListenAddr: opts.listenAddr.String(), AdvertiseAddr: opts.advertiseAddr, RemoteAddrs: []string{opts.remote}, } if flags.Changed(flagAvailability) { availability := swarm.NodeAvailability(strings.ToLower(opts.availability)) switch availability { case swarm.NodeAvailabilityActive, swarm.NodeAvailabilityPause, swarm.NodeAvailabilityDrain: req.Availability = availability default: return fmt.Errorf("invalid availability %q, only active, pause and drain are supported", opts.availability) } } err := client.SwarmJoin(ctx, req) if err != nil { return err } info, err := client.Info(ctx) if err != nil { return err } if info.Swarm.ControlAvailable { fmt.Fprintln(dockerCli.Out(), "This node joined a swarm as a manager.") } else { fmt.Fprintln(dockerCli.Out(), "This node joined a swarm as a worker.") } return nil }
func printJoinCommand(ctx context.Context, dockerCli command.Cli, nodeID string, worker bool, manager bool) error { client := dockerCli.Client() node, _, err := client.NodeInspectWithRaw(ctx, nodeID) if err != nil { return err } sw, err := client.SwarmInspect(ctx) if err != nil { return err } if node.ManagerStatus != nil { if worker { fmt.Fprintf(dockerCli.Out(), "To add a worker to this swarm, run the following command:\n\n docker swarm join \\\n --token %s \\\n %s\n\n", sw.JoinTokens.Worker, node.ManagerStatus.Addr) } if manager { fmt.Fprintf(dockerCli.Out(), "To add a manager to this swarm, run the following command:\n\n docker swarm join \\\n --token %s \\\n %s\n\n", sw.JoinTokens.Manager, node.ManagerStatus.Addr) } } return nil }
func printUnlockCommand(ctx context.Context, dockerCli command.Cli, unlockKey string) { if len(unlockKey) > 0 { fmt.Fprintf(dockerCli.Out(), "To unlock a swarm manager after it restarts, run the `docker swarm unlock`\ncommand and provide the following key:\n\n %s\n\nPlease remember to store this key in a password manager, since without it you\nwill not be able to restart the manager.\n", unlockKey) } return }
func runUpdate(dockerCli command.Cli, flags *pflag.FlagSet, nodeID string) error { success := func(_ string) { fmt.Fprintln(dockerCli.Out(), nodeID) } return updateNodes(dockerCli, []string{nodeID}, mergeNodeUpdate(flags), success) }