func updatePorts(flags *pflag.FlagSet, portConfig *[]swarm.PortConfig) { if flags.Changed(flagPublishAdd) { values := flags.Lookup(flagPublishAdd).Value.(*opts.ListOpts).GetAll() ports, portBindings, _ := nat.ParsePortSpecs(values) for port := range ports { *portConfig = append(*portConfig, convertPortToPortConfig(port, portBindings)...) } } if !flags.Changed(flagPublishRemove) { return } toRemove := flags.Lookup(flagPublishRemove).Value.(*opts.ListOpts).GetAll() newPorts := []swarm.PortConfig{} portLoop: for _, port := range *portConfig { for _, rawTargetPort := range toRemove { targetPort := nat.Port(rawTargetPort) if equalPort(targetPort, port) { continue portLoop } } newPorts = append(newPorts, port) } *portConfig = newPorts }
// TODO: should this override by destination path, or does swarm handle that? func updateMounts(flags *pflag.FlagSet, mounts *[]swarm.Mount) { if !flags.Changed(flagMount) { return } *mounts = flags.Lookup(flagMount).Value.(*MountOpt).Value() }
// ParseLogDriverFlags parses a silly string format for log driver and options. // Fully baked log driver config should be returned. // // If no log driver is available, nil, nil will be returned. func ParseLogDriverFlags(flags *pflag.FlagSet) (*api.Driver, error) { if !flags.Changed("log-driver") { return nil, nil } name, err := flags.GetString("log-driver") if err != nil { return nil, err } var opts map[string]string if flags.Changed("log-opt") { rawOpts, err := flags.GetStringSlice("log-opt") if err != nil { return nil, err } opts = make(map[string]string, len(rawOpts)) for _, rawOpt := range rawOpts { parts := strings.SplitN(rawOpt, "=", 2) if len(parts) == 1 { opts[parts[0]] = "" continue } opts[parts[0]] = parts[1] } } return &api.Driver{ Name: name, Options: opts, }, nil }
func parseUpdate(flags *pflag.FlagSet, spec *api.ServiceSpec) error { if flags.Changed("update-parallelism") { parallelism, err := flags.GetUint64("update-parallelism") if err != nil { return err } if spec.Update == nil { spec.Update = &api.UpdateConfig{} } spec.Update.Parallelism = parallelism } if flags.Changed("update-delay") { delay, err := flags.GetString("update-delay") if err != nil { return err } delayDuration, err := time.ParseDuration(delay) if err != nil { return err } if spec.Update == nil { spec.Update = &api.UpdateConfig{} } spec.Update.Delay = *ptypes.DurationProto(delayDuration) } return nil }
func updateMounts(flags *pflag.FlagSet, mounts *[]mounttypes.Mount) error { mountsByTarget := map[string]mounttypes.Mount{} if flags.Changed(flagMountAdd) { values := flags.Lookup(flagMountAdd).Value.(*opts.MountOpt).Value() for _, mount := range values { if _, ok := mountsByTarget[mount.Target]; ok { return fmt.Errorf("duplicate mount target") } mountsByTarget[mount.Target] = mount } } // Add old list of mount points minus updated one. for _, mount := range *mounts { if _, ok := mountsByTarget[mount.Target]; !ok { mountsByTarget[mount.Target] = mount } } newMounts := []mounttypes.Mount{} toRemove := buildToRemoveSet(flags, flagMountRemove) for _, mount := range mountsByTarget { if _, exists := toRemove[mount.Target]; !exists { newMounts = append(newMounts, mount) } } sort.Sort(byMountSource(newMounts)) *mounts = newMounts return nil }
// parseBind only supports a very simple version of bind for testing the most // basic of data flows. Replace with a --mount flag, similar to what we have in // docker service. func parseBind(flags *pflag.FlagSet, spec *api.ServiceSpec) error { if flags.Changed("bind") { binds, err := flags.GetStringSlice("bind") if err != nil { return err } container := spec.Task.GetContainer() for _, bind := range binds { parts := strings.SplitN(bind, ":", 2) if len(parts) != 2 { return fmt.Errorf("bind format %q not supported", bind) } container.Mounts = append(container.Mounts, api.Mount{ Type: api.MountTypeBind, Source: parts[0], Target: parts[1], Writable: true, }) } } return nil }
func parsePorts(flags *pflag.FlagSet, spec *api.ServiceSpec) error { if !flags.Changed("ports") { return nil } portConfigs, err := flags.GetStringSlice("ports") if err != nil { return err } ports := []*api.PortConfig{} for _, portConfig := range portConfigs { name, protocol, port, swarmPort, err := parsePortConfig(portConfig) if err != nil { return err } ports = append(ports, &api.PortConfig{ Name: name, Protocol: protocol, TargetPort: port, PublishedPort: swarmPort, // In swarmctl all ports are by default // PublishModeHost PublishMode: api.PublishModeHost, }) } spec.Endpoint = &api.EndpointSpec{ Ports: ports, } return nil }
func updateEnvironment(flags *pflag.FlagSet, field *[]string) { if flags.Changed(flagEnvAdd) { value := flags.Lookup(flagEnvAdd).Value.(*opts.ListOpts) *field = append(*field, value.GetAll()...) } toRemove := buildToRemoveSet(flags, flagEnvRemove) *field = removeItems(*field, toRemove, envKey) }
// TODO: should this override by destination path, or does swarm handle that? func mergeMounts(flags *pflag.FlagSet, mounts *[]swarm.Mount) { if !flags.Changed("mount") { return } values := flags.Lookup("mount").Value.(*MountOpt).Value() *mounts = append(*mounts, values...) }
func anyChanged(flags *pflag.FlagSet, fields ...string) bool { for _, flag := range fields { if flags.Changed(flag) { return true } } return false }
func mergeNetworks(flags *pflag.FlagSet, attachments *[]swarm.NetworkAttachmentConfig) { if !flags.Changed("network") { return } networks, _ := flags.GetStringSlice("network") for _, network := range networks { *attachments = append(*attachments, swarm.NetworkAttachmentConfig{Target: network}) } }
func getFromEnvIfNotSet(flags *pflag.FlagSet, flagName, envName, value string) string { if flags.Changed(flagName) { return value } if env := os.Getenv(envName); env != "" { return env } return value // not changed, so presumably the default }
func updateStringToSlice(flags *pflag.FlagSet, flag string, field *[]string) error { if !flags.Changed(flag) { return nil } value, _ := flags.GetString(flag) valueSlice, err := shlex.Split(value) *field = valueSlice return err }
func updateReplicas(flags *pflag.FlagSet, serviceMode *swarm.ServiceMode) error { if !flags.Changed(flagReplicas) { return nil } if serviceMode == nil || serviceMode.Replicated == nil { return fmt.Errorf("replicas can only be used with replicated mode") } serviceMode.Replicated.Replicas = flags.Lookup(flagReplicas).Value.(*Uint64Opt).Value() return nil }
func updateNetworks(flags *pflag.FlagSet, attachments *[]swarm.NetworkAttachmentConfig) { if !flags.Changed(flagNetwork) { return } networks, _ := flags.GetStringSlice(flagNetwork) var localAttachments []swarm.NetworkAttachmentConfig for _, network := range networks { localAttachments = append(localAttachments, swarm.NetworkAttachmentConfig{Target: network}) } *attachments = localAttachments }
// TODO: should this override by name, or does swarm handle that? func mergePorts(flags *pflag.FlagSet, portConfig *[]swarm.PortConfig) { if !flags.Changed("ports") { return } values := flags.Lookup("ports").Value.(*opts.ListOpts).GetAll() ports, portBindings, _ := nat.ParsePortSpecs(values) for port := range ports { *portConfig = append(*portConfig, convertPortToPortConfig(port, portBindings)...) } }
func updateLabels(flags *pflag.FlagSet, field *map[string]string) { if !flags.Changed(flagLabel) { return } values := flags.Lookup(flagLabel).Value.(*opts.ListOpts).GetAll() localLabels := map[string]string{} for key, value := range runconfigopts.ConvertKVStringsToMap(values) { localLabels[key] = value } *field = localLabels }
func buildToRemoveSet(flags *pflag.FlagSet, flag string) map[string]struct{} { var empty struct{} toRemove := make(map[string]struct{}) if !flags.Changed(flag) { return toRemove } toRemoveSlice := flags.Lookup(flag).Value.(*opts.ListOpts).GetAll() for _, key := range toRemoveSlice { toRemove[key] = empty } return toRemove }
// TODO: should this override by name, or does swarm handle that? func updatePorts(flags *pflag.FlagSet, portConfig *[]swarm.PortConfig) { if !flags.Changed(flagPublish) { return } values := flags.Lookup(flagPublish).Value.(*opts.ListOpts).GetAll() ports, portBindings, _ := nat.ParsePortSpecs(values) var localPortConfig []swarm.PortConfig for port := range ports { localPortConfig = append(localPortConfig, convertPortToPortConfig(port, portBindings)...) } *portConfig = localPortConfig }
func mergeLabels(flags *pflag.FlagSet, field *map[string]string) { if !flags.Changed("label") { return } if *field == nil { *field = make(map[string]string) } values := flags.Lookup("label").Value.(*opts.ListOpts).GetAll() for key, value := range runconfigopts.ConvertKVStringsToMap(values) { (*field)[key] = value } }
func parseContainer(flags *pflag.FlagSet, spec *api.ServiceSpec) error { if flags.Changed("image") { image, err := flags.GetString("image") if err != nil { return err } spec.Task.GetContainer().Image = image } if flags.Changed("command") { command, err := flags.GetStringSlice("command") if err != nil { return err } spec.Task.GetContainer().Command = command } if flags.Changed("args") { args, err := flags.GetStringSlice("args") if err != nil { return err } spec.Task.GetContainer().Args = args } if flags.Changed("env") { env, err := flags.GetStringSlice("env") if err != nil { return err } spec.Task.GetContainer().Env = env } return nil }
func mergeSwarm(swarm *swarm.Swarm, flags *pflag.FlagSet) error { spec := &swarm.Spec if flags.Changed(flagTaskHistoryLimit) { spec.Orchestration.TaskHistoryRetentionLimit, _ = flags.GetInt64(flagTaskHistoryLimit) } if flags.Changed(flagDispatcherHeartbeat) { if v, err := flags.GetDuration(flagDispatcherHeartbeat); err == nil { spec.Dispatcher.HeartbeatPeriod = uint64(v.Nanoseconds()) } } if flags.Changed(flagCertExpiry) { if v, err := flags.GetDuration(flagCertExpiry); err == nil { spec.CAConfig.NodeCertExpiry = v } } if flags.Changed(flagExternalCA) { value := flags.Lookup(flagExternalCA).Value.(*ExternalCAOption) spec.CAConfig.ExternalCAs = value.Value() } return nil }
func mergeSwarm(swarm *swarm.Swarm, flags *pflag.FlagSet) error { spec := &swarm.Spec if flags.Changed("auto-accept") { value := flags.Lookup("auto-accept").Value.(*AutoAcceptOption) if len(spec.AcceptancePolicy.Policies) > 0 { spec.AcceptancePolicy.Policies = value.Policies(spec.AcceptancePolicy.Policies[0].Secret) } else { spec.AcceptancePolicy.Policies = value.Policies("") } } if flags.Changed("secret") { secret, _ := flags.GetString("secret") for _, policy := range spec.AcceptancePolicy.Policies { policy.Secret = secret } } if flags.Changed("task-history-limit") { spec.Orchestration.TaskHistoryRetentionLimit, _ = flags.GetInt64("task-history-limit") } if flags.Changed("dispatcher-heartbeat") { if v, err := flags.GetDuration("dispatcher-heartbeat"); err == nil { spec.Dispatcher.HeartbeatPeriod = uint64(v.Nanoseconds()) } } return nil }
func parsePlacement(flags *pflag.FlagSet, spec *api.ServiceSpec) error { if flags.Changed("constraint") { constraints, err := flags.GetStringSlice("constraint") if err != nil { return err } if spec.Task.Placement == nil { spec.Task.Placement = &api.Placement{} } spec.Task.Placement.Constraints = constraints } return nil }
func updateMounts(flags *pflag.FlagSet, mounts *[]mounttypes.Mount) { if flags.Changed(flagMountAdd) { values := flags.Lookup(flagMountAdd).Value.(*MountOpt).Value() *mounts = append(*mounts, values...) } toRemove := buildToRemoveSet(flags, flagMountRemove) newMounts := []mounttypes.Mount{} for _, mount := range *mounts { if _, exists := toRemove[mount.Target]; !exists { newMounts = append(newMounts, mount) } } *mounts = newMounts }
func updateNetworks(flags *pflag.FlagSet, attachments *[]swarm.NetworkAttachmentConfig) { if flags.Changed(flagNetworkAdd) { networks, _ := flags.GetStringSlice(flagNetworkAdd) for _, network := range networks { *attachments = append(*attachments, swarm.NetworkAttachmentConfig{Target: network}) } } toRemove := buildToRemoveSet(flags, flagNetworkRemove) newNetworks := []swarm.NetworkAttachmentConfig{} for _, network := range *attachments { if _, exists := toRemove[network.Target]; !exists { newNetworks = append(newNetworks, network) } } *attachments = newNetworks }
func mergeNodeUpdate(flags *pflag.FlagSet) func(*swarm.Node) error { return func(node *swarm.Node) error { spec := &node.Spec if flags.Changed(flagRole) { str, err := flags.GetString(flagRole) if err != nil { return err } spec.Role = swarm.NodeRole(str) } if flags.Changed(flagMembership) { str, err := flags.GetString(flagMembership) if err != nil { return err } spec.Membership = swarm.NodeMembership(str) } if flags.Changed(flagAvailability) { str, err := flags.GetString(flagAvailability) if err != nil { return err } spec.Availability = swarm.NodeAvailability(str) } if spec.Annotations.Labels == nil { spec.Annotations.Labels = make(map[string]string) } if flags.Changed(flagLabelAdd) { labels := flags.Lookup(flagLabelAdd).Value.(*opts.ListOpts).GetAll() for k, v := range runconfigopts.ConvertKVStringsToMap(labels) { spec.Annotations.Labels[k] = v } } if flags.Changed(flagLabelRemove) { keys := flags.Lookup(flagLabelRemove).Value.(*opts.ListOpts).GetAll() for _, k := range keys { // if a key doesn't exist, fail the command explicitly if _, exists := spec.Annotations.Labels[k]; !exists { return fmt.Errorf("key %s doesn't exist in node's labels", k) } delete(spec.Annotations.Labels, k) } } return nil } }
func mergeNodeUpdate(flags *pflag.FlagSet) func(*swarm.Node) { return func(node *swarm.Node) { mergeString := func(flag string, field *string) { if flags.Changed(flag) { *field, _ = flags.GetString(flag) } } mergeRole := func(flag string, field *swarm.NodeRole) { if flags.Changed(flag) { str, _ := flags.GetString(flag) *field = swarm.NodeRole(str) } } mergeMembership := func(flag string, field *swarm.NodeMembership) { if flags.Changed(flag) { str, _ := flags.GetString(flag) *field = swarm.NodeMembership(str) } } mergeAvailability := func(flag string, field *swarm.NodeAvailability) { if flags.Changed(flag) { str, _ := flags.GetString(flag) *field = swarm.NodeAvailability(str) } } mergeLabels := func(flag string, field *map[string]string) { if flags.Changed(flag) { values, _ := flags.GetStringSlice(flag) for key, value := range runconfigopts.ConvertKVStringsToMap(values) { (*field)[key] = value } } } spec := &node.Spec mergeString("name", &spec.Name) // TODO: setting labels is not working mergeLabels("label", &spec.Labels) mergeRole("role", &spec.Role) mergeMembership("membership", &spec.Membership) mergeAvailability("availability", &spec.Availability) } }
func runInit(dockerCli command.Cli, flags *pflag.FlagSet, opts initOptions) error { client := dockerCli.Client() ctx := context.Background() req := swarm.InitRequest{ ListenAddr: opts.listenAddr.String(), AdvertiseAddr: opts.advertiseAddr, ForceNewCluster: opts.forceNewCluster, Spec: opts.swarmOptions.ToSpec(flags), AutoLockManagers: opts.swarmOptions.autolock, } if flags.Changed(flagAvailability) { availability := swarm.NodeAvailability(strings.ToLower(opts.availability)) switch availability { case swarm.NodeAvailabilityActive, swarm.NodeAvailabilityPause, swarm.NodeAvailabilityDrain: req.Availability = availability default: return fmt.Errorf("invalid availability %q, only active, pause and drain are supported", opts.availability) } } nodeID, err := client.SwarmInit(ctx, req) if err != nil { if strings.Contains(err.Error(), "could not choose an IP address to advertise") || strings.Contains(err.Error(), "could not find the system's IP address") { return errors.New(err.Error() + " - specify one with --advertise-addr") } return err } fmt.Fprintf(dockerCli.Out(), "Swarm initialized: current node (%s) is now a manager.\n\n", nodeID) if err := printJoinCommand(ctx, dockerCli, nodeID, false, true); err != nil { return err } fmt.Fprint(dockerCli.Out(), "To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.\n\n") if req.AutoLockManagers { unlockKeyResp, err := client.SwarmGetUnlockKey(ctx) if err != nil { return errors.Wrap(err, "could not fetch unlock key") } printUnlockCommand(ctx, dockerCli, unlockKeyResp.UnlockKey) } return nil }
func updatePorts(flags *pflag.FlagSet, portConfig *[]swarm.PortConfig) error { // The key of the map is `port/protocol`, e.g., `80/tcp` portSet := map[string]swarm.PortConfig{} // Build the current list of portConfig for _, entry := range *portConfig { if _, ok := portSet[portConfigToString(&entry)]; !ok { portSet[portConfigToString(&entry)] = entry } } newPorts := []swarm.PortConfig{} // Clean current ports toRemove := flags.Lookup(flagPublishRemove).Value.(*opts.PortOpt).Value() portLoop: for _, port := range portSet { for _, pConfig := range toRemove { if equalProtocol(port.Protocol, pConfig.Protocol) && port.TargetPort == pConfig.TargetPort && equalPublishMode(port.PublishMode, pConfig.PublishMode) { continue portLoop } } newPorts = append(newPorts, port) } // Check to see if there are any conflict in flags. if flags.Changed(flagPublishAdd) { ports := flags.Lookup(flagPublishAdd).Value.(*opts.PortOpt).Value() for _, port := range ports { if _, ok := portSet[portConfigToString(&port)]; ok { continue } //portSet[portConfigToString(&port)] = port newPorts = append(newPorts, port) } } // Sort the PortConfig to avoid unnecessary updates sort.Sort(byPortConfig(newPorts)) *portConfig = newPorts return nil }