func inspectAll(ctx context.Context, dockerCli *client.DockerCli, getSize bool) inspect.GetRefFunc { client := dockerCli.Client() return func(ref string) (interface{}, []byte, error) { c, rawContainer, err := client.ContainerInspectWithRaw(ctx, ref, getSize) if err == nil || !apiclient.IsErrNotFound(err) { return c, rawContainer, err } // Search for image with that id if a container doesn't exist. i, rawImage, err := client.ImageInspectWithRaw(ctx, ref) if err == nil || !apiclient.IsErrNotFound(err) { return i, rawImage, err } // Search for task with that id if an image doesn't exist. t, rawTask, err := client.TaskInspectWithRaw(ctx, ref) if err == nil || !(apiclient.IsErrNotFound(err) || isErrorNoSwarmMode(err)) { if getSize { fmt.Fprintln(dockerCli.Err(), "WARNING: --size ignored for tasks") } return t, rawTask, err } return nil, nil, fmt.Errorf("Error: No such container, image or task: %s", ref) } }
// ForwardAllSignals forwards signals to the container func ForwardAllSignals(ctx context.Context, cli *client.DockerCli, cid string) chan os.Signal { sigc := make(chan os.Signal, 128) signal.CatchAll(sigc) go func() { for s := range sigc { if s == signal.SIGCHLD || s == signal.SIGPIPE { continue } var sig string for sigStr, sigN := range signal.SignalMap { if sigN == s { sig = sigStr break } } if sig == "" { fmt.Fprintf(cli.Err(), "Unsupported signal: %v. Discarding.\n", s) continue } if err := cli.Client().ContainerKill(ctx, cid, sig); err != nil { logrus.Debugf("Error sending signal: %s", err) } } }() return sigc }
func runLogs(dockerCli *client.DockerCli, opts *logsOptions) error { ctx := context.Background() c, err := dockerCli.Client().ContainerInspect(ctx, opts.container) if err != nil { return err } if !validDrivers[c.HostConfig.LogConfig.Type] { return fmt.Errorf("\"logs\" command is supported only for \"json-file\" and \"journald\" logging drivers (got: %s)", c.HostConfig.LogConfig.Type) } options := types.ContainerLogsOptions{ ShowStdout: true, ShowStderr: true, Since: opts.since, Timestamps: opts.timestamps, Follow: opts.follow, Tail: opts.tail, Details: opts.details, } responseBody, err := dockerCli.Client().ContainerLogs(ctx, opts.container, options) if err != nil { return err } defer responseBody.Close() if c.Config.Tty { _, err = io.Copy(dockerCli.Out(), responseBody) } else { _, err = stdcopy.StdCopy(dockerCli.Out(), dockerCli.Err(), responseBody) } return err }
func runConfig(dockerCli *client.DockerCli, opts configOptions) error { bundle, err := loadBundlefile(dockerCli.Err(), opts.namespace, opts.bundlefile) if err != nil { return err } return bundlefile.Print(dockerCli.Out(), bundle) }
func runInspect(dockerCli *client.DockerCli, opts inspectOptions) error { ctx := context.Background() client := dockerCli.Client() var getRefFunc inspect.GetRefFunc switch opts.inspectType { case "container": getRefFunc = func(ref string) (interface{}, []byte, error) { return client.ContainerInspectWithRaw(ctx, ref, opts.size) } case "image": getRefFunc = func(ref string) (interface{}, []byte, error) { return client.ImageInspectWithRaw(ctx, ref) } case "task": if opts.size { fmt.Fprintln(dockerCli.Err(), "WARNING: --size ignored for tasks") } getRefFunc = func(ref string) (interface{}, []byte, error) { return client.TaskInspectWithRaw(ctx, ref) } case "": getRefFunc = inspectAll(ctx, dockerCli, opts.size) default: return fmt.Errorf("%q is not a valid value for --type", opts.inspectType) } return inspect.Inspect(dockerCli.Out(), opts.ids, opts.format, getRefFunc) }
func newCreateCommand(dockerCli *client.DockerCli) *cobra.Command { opts := createOptions{ driverOpts: *opts.NewMapOpts(nil, nil), } cmd := &cobra.Command{ Use: "create [OPTIONS] [VOLUME]", Short: "Create a volume", Long: createDescription, Args: cli.RequiresMaxArgs(1), RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 1 { if opts.name != "" { fmt.Fprint(dockerCli.Err(), "Conflicting options: either specify --name or provide positional arg, not both\n") return cli.StatusError{StatusCode: 1} } opts.name = args[0] } return runCreate(dockerCli, opts) }, } flags := cmd.Flags() flags.StringVarP(&opts.driver, "driver", "d", "local", "Specify volume driver name") flags.StringVar(&opts.name, "name", "", "Specify volume name") flags.Lookup("name").Hidden = true flags.VarP(&opts.driverOpts, "opt", "o", "Set driver specific options") flags.StringSliceVar(&opts.labels, "label", []string{}, "Set metadata for a volume") return cmd }
func runCreate(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts *createOptions, copts *runconfigopts.ContainerOptions) error { config, hostConfig, networkingConfig, err := runconfigopts.Parse(flags, copts) if err != nil { reportError(dockerCli.Err(), "create", err.Error(), true) return cli.StatusError{StatusCode: 125} } response, err := createContainer(context.Background(), dockerCli, config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, opts.name) if err != nil { return err } fmt.Fprintf(dockerCli.Out(), "%s\n", response.ID) return nil }
func runDeploy(dockerCli *client.DockerCli, opts deployOptions) error { bundle, err := loadBundlefile(dockerCli.Err(), opts.namespace, opts.bundlefile) if err != nil { return err } networks := getUniqueNetworkNames(bundle.Services) ctx := context.Background() if err := updateNetworks(ctx, dockerCli, networks, opts.namespace); err != nil { return err } return deployServices(ctx, dockerCli, bundle.Services, opts.namespace) }
func startContainersWithoutAttachments(dockerCli *client.DockerCli, ctx context.Context, containers []string) error { var failedContainers []string for _, container := range containers { if err := dockerCli.Client().ContainerStart(ctx, container, types.ContainerStartOptions{}); err != nil { fmt.Fprintf(dockerCli.Err(), "%s\n", err) failedContainers = append(failedContainers, container) } else { fmt.Fprintf(dockerCli.Out(), "%s\n", container) } } if len(failedContainers) > 0 { return fmt.Errorf("Error: failed to start containers: %v", strings.Join(failedContainers, ", ")) } return nil }
func runRename(dockerCli *client.DockerCli, opts *renameOptions) error { ctx := context.Background() oldName := strings.TrimSpace(opts.oldName) newName := strings.TrimSpace(opts.newName) if oldName == "" || newName == "" { return fmt.Errorf("Error: Neither old nor new names may be empty") } if err := dockerCli.Client().ContainerRename(ctx, oldName, newName); err != nil { fmt.Fprintf(dockerCli.Err(), "%s\n", err) return fmt.Errorf("Error: failed to rename container named %s", oldName) } return nil }
// NewVolumeCommand returns a cobra command for `volume` subcommands func NewVolumeCommand(dockerCli *client.DockerCli) *cobra.Command { cmd := &cobra.Command{ Use: "volume", Short: "Manage Docker volumes", Args: cli.NoArgs, Run: func(cmd *cobra.Command, args []string) { fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString()) }, } cmd.AddCommand( newCreateCommand(dockerCli), newInspectCommand(dockerCli), newListCommand(dockerCli), newRemoveCommand(dockerCli), ) return cmd }
// NewStackCommand returns a cobra command for `stack` subcommands func NewStackCommand(dockerCli *client.DockerCli) *cobra.Command { cmd := &cobra.Command{ Use: "stack", Short: "Manage Docker stacks", Args: cli.NoArgs, Run: func(cmd *cobra.Command, args []string) { fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString()) }, } cmd.AddCommand( newConfigCommand(dockerCli), newDeployCommand(dockerCli), newRemoveCommand(dockerCli), newTasksCommand(dockerCli), ) return cmd }
func runLogout(dockerCli *client.DockerCli, serverAddress string) error { ctx := context.Background() var isDefaultRegistry bool if serverAddress == "" { serverAddress = dockerCli.ElectAuthServer(ctx) isDefaultRegistry = true } var ( loggedIn bool regsToLogout []string hostnameAddress = serverAddress regsToTry = []string{serverAddress} ) if !isDefaultRegistry { hostnameAddress = registry.ConvertToHostname(serverAddress) // the tries below are kept for backward compatibily where a user could have // saved the registry in one of the following format. regsToTry = append(regsToTry, hostnameAddress, "http://"+hostnameAddress, "https://"+hostnameAddress) } // check if we're logged in based on the records in the config file // which means it couldn't have user/pass cause they may be in the creds store for _, s := range regsToTry { if _, ok := dockerCli.ConfigFile().AuthConfigs[s]; ok { loggedIn = true regsToLogout = append(regsToLogout, s) } } if !loggedIn { fmt.Fprintf(dockerCli.Out(), "Not logged in to %s\n", hostnameAddress) return nil } fmt.Fprintf(dockerCli.Out(), "Removing login credentials for %s\n", hostnameAddress) for _, r := range regsToLogout { if err := client.EraseCredentials(dockerCli.ConfigFile(), r); err != nil { fmt.Fprintf(dockerCli.Err(), "WARNING: could not erase credentials: %v\n", err) } } return nil }
// NewSwarmCommand returns a cobra command for `swarm` subcommands func NewSwarmCommand(dockerCli *client.DockerCli) *cobra.Command { cmd := &cobra.Command{ Use: "swarm", Short: "Manage docker swarm", Args: cli.NoArgs, Run: func(cmd *cobra.Command, args []string) { fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString()) }, } cmd.AddCommand( newInitCommand(dockerCli), newJoinCommand(dockerCli), newUpdateCommand(dockerCli), newLeaveCommand(dockerCli), newInspectCommand(dockerCli), ) return cmd }
func runRemove(dockerCli *client.DockerCli, opts *removeOptions) error { client := dockerCli.Client() ctx := context.Background() status := 0 for _, name := range opts.volumes { if err := client.VolumeRemove(ctx, name, opts.force); err != nil { fmt.Fprintf(dockerCli.Err(), "%s\n", err) status = 1 continue } fmt.Fprintf(dockerCli.Out(), "%s\n", name) } if status != 0 { return cli.StatusError{StatusCode: status} } return nil }
func runRemove(dockerCli *client.DockerCli, networks []string) error { client := dockerCli.Client() ctx := context.Background() status := 0 for _, name := range networks { if err := client.NetworkRemove(ctx, name); err != nil { fmt.Fprintf(dockerCli.Err(), "%s\n", err) status = 1 continue } fmt.Fprintf(dockerCli.Err(), "%s\n", name) } if status != 0 { return cli.StatusError{StatusCode: status} } return nil }
// NewNodeCommand returns a cobra command for `node` subcommands func NewNodeCommand(dockerCli *client.DockerCli) *cobra.Command { cmd := &cobra.Command{ Use: "node", Short: "Manage Docker Swarm nodes", Args: cli.NoArgs, Run: func(cmd *cobra.Command, args []string) { fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString()) }, } cmd.AddCommand( newDemoteCommand(dockerCli), newInspectCommand(dockerCli), newListCommand(dockerCli), newPromoteCommand(dockerCli), newRemoveCommand(dockerCli), newPsCommand(dockerCli), newUpdateCommand(dockerCli), ) return cmd }
func runRemove(dockerCli *client.DockerCli, opts removeOptions) error { namespace := opts.namespace client := dockerCli.Client() stderr := dockerCli.Err() ctx := context.Background() hasError := false services, err := getServices(ctx, client, namespace) if err != nil { return err } for _, service := range services { fmt.Fprintf(stderr, "Removing service %s\n", service.Spec.Name) if err := client.ServiceRemove(ctx, service.ID); err != nil { hasError = true fmt.Fprintf(stderr, "Failed to remove service %s: %s", service.ID, err) } } networks, err := getNetworks(ctx, client, namespace) if err != nil { return err } for _, network := range networks { fmt.Fprintf(stderr, "Removing network %s\n", network.Name) if err := client.NetworkRemove(ctx, network.ID); err != nil { hasError = true fmt.Fprintf(stderr, "Failed to remove network %s: %s", network.ID, err) } } if len(services) == 0 && len(networks) == 0 { fmt.Fprintf(dockerCli.Out(), "Nothing found in stack: %s\n", namespace) return nil } if hasError { return fmt.Errorf("Failed to remove some resources") } return nil }
func runLogout(dockerCli *client.DockerCli, serverAddress string) error { ctx := context.Background() if serverAddress == "" { serverAddress = dockerCli.ElectAuthServer(ctx) } // check if we're logged in based on the records in the config file // which means it couldn't have user/pass cause they may be in the creds store if _, ok := dockerCli.ConfigFile().AuthConfigs[serverAddress]; !ok { fmt.Fprintf(dockerCli.Out(), "Not logged in to %s\n", serverAddress) return nil } fmt.Fprintf(dockerCli.Out(), "Removing login credentials for %s\n", serverAddress) if err := client.EraseCredentials(dockerCli.ConfigFile(), serverAddress); err != nil { fmt.Fprintf(dockerCli.Err(), "WARNING: could not erase credentials: %v\n", err) } return nil }
func runDeploy(dockerCli *client.DockerCli, opts deployOptions) error { bundle, err := loadBundlefile(dockerCli.Err(), opts.namespace, opts.bundlefile) if err != nil { return err } info, err := dockerCli.Client().Info(context.Background()) if err != nil { return err } if !info.Swarm.ControlAvailable { return fmt.Errorf("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again.") } networks := getUniqueNetworkNames(bundle.Services) ctx := context.Background() if err := updateNetworks(ctx, dockerCli, networks, opts.namespace); err != nil { return err } return deployServices(ctx, dockerCli, bundle.Services, opts.namespace, opts.sendRegistryAuth) }
func inspectAll(ctx context.Context, dockerCli *client.DockerCli, getSize bool, typeConstraint string) inspect.GetRefFunc { var inspectAutodetect = []struct { ObjectType string IsSizeSupported bool ObjectInspector func(string) (interface{}, []byte, error) }{ {"container", true, inspectContainers(ctx, dockerCli, getSize)}, {"image", true, inspectImages(ctx, dockerCli)}, {"network", false, inspectNetwork(ctx, dockerCli)}, {"volume", false, inspectVolume(ctx, dockerCli)}, {"service", false, inspectService(ctx, dockerCli)}, {"task", false, inspectTasks(ctx, dockerCli)}, {"node", false, inspectNode(ctx, dockerCli)}, } isErrNotSwarmManager := func(err error) bool { return strings.Contains(err.Error(), "This node is not a swarm manager") } return func(ref string) (interface{}, []byte, error) { for _, inspectData := range inspectAutodetect { if typeConstraint != "" && inspectData.ObjectType != typeConstraint { continue } v, raw, err := inspectData.ObjectInspector(ref) if err != nil { if typeConstraint == "" && (apiclient.IsErrNotFound(err) || isErrNotSwarmManager(err)) { continue } return v, raw, err } if !inspectData.IsSizeSupported { fmt.Fprintf(dockerCli.Err(), "WARNING: --size ignored for %s\n", inspectData.ObjectType) } return v, raw, err } return nil, nil, fmt.Errorf("Error: No such object: %s", ref) } }
func newDockerCommand(dockerCli *client.DockerCli) *cobra.Command { opts := cliflags.NewClientOptions() var flags *pflag.FlagSet cmd := &cobra.Command{ Use: "docker [OPTIONS] COMMAND [arg...]", Short: "A self-sufficient runtime for containers.", SilenceUsage: true, SilenceErrors: true, TraverseChildren: true, Args: noArgs, RunE: func(cmd *cobra.Command, args []string) error { if opts.Version { showVersion() return nil } fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString()) return nil }, PersistentPreRunE: func(cmd *cobra.Command, args []string) error { // flags must be the top-level command flags, not cmd.Flags() opts.Common.SetDefaultOptions(flags) dockerPreRun(opts) return dockerCli.Initialize(opts) }, } cli.SetupRootCommand(cmd) flags = cmd.Flags() flags.BoolVarP(&opts.Version, "version", "v", false, "Print version information and quit") flags.StringVar(&opts.ConfigDir, "config", cliconfig.ConfigDir(), "Location of client config files") opts.Common.InstallFlags(flags) cmd.SetOutput(dockerCli.Out()) cmd.AddCommand(newDaemonCommand()) command.AddCommands(cmd, dockerCli) return cmd }
// NewPluginCommand returns a cobra command for `plugin` subcommands func NewPluginCommand(rootCmd *cobra.Command, dockerCli *client.DockerCli) { cmd := &cobra.Command{ Use: "plugin", Short: "Manage Docker plugins", Args: cli.NoArgs, Run: func(cmd *cobra.Command, args []string) { fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString()) }, } cmd.AddCommand( newDisableCommand(dockerCli), newEnableCommand(dockerCli), newInspectCommand(dockerCli), newInstallCommand(dockerCli), newListCommand(dockerCli), newRemoveCommand(dockerCli), newSetCommand(dockerCli), newPushCommand(dockerCli), ) rootCmd.AddCommand(cmd) }
func runList(dockerCli *client.DockerCli, opts listOptions) error { client := dockerCli.Client() volFilterArgs := filters.NewArgs() for _, f := range opts.filter { var err error volFilterArgs, err = filters.ParseFlag(f, volFilterArgs) if err != nil { return err } } volumes, err := client.VolumeList(context.Background(), volFilterArgs) if err != nil { return err } w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) if !opts.quiet { for _, warn := range volumes.Warnings { fmt.Fprintln(dockerCli.Err(), warn) } fmt.Fprintf(w, "DRIVER \tVOLUME NAME") fmt.Fprintf(w, "\n") } sort.Sort(byVolumeName(volumes.Volumes)) for _, vol := range volumes.Volumes { if opts.quiet { fmt.Fprintln(w, vol.Name) continue } fmt.Fprintf(w, "%s\t%s\n", vol.Driver, vol.Name) } w.Flush() return nil }
func runAttach(dockerCli *client.DockerCli, opts *attachOptions) error { ctx := context.Background() c, err := dockerCli.Client().ContainerInspect(ctx, opts.container) if err != nil { return err } if !c.State.Running { return fmt.Errorf("You cannot attach to a stopped container, start it first") } if c.State.Paused { return fmt.Errorf("You cannot attach to a paused container, unpause it first") } if err := dockerCli.CheckTtyInput(!opts.noStdin, c.Config.Tty); err != nil { return err } if opts.detachKeys != "" { dockerCli.ConfigFile().DetachKeys = opts.detachKeys } options := types.ContainerAttachOptions{ Stream: true, Stdin: !opts.noStdin && c.Config.OpenStdin, Stdout: true, Stderr: true, DetachKeys: dockerCli.ConfigFile().DetachKeys, } var in io.ReadCloser if options.Stdin { in = dockerCli.In() } if opts.proxy && !c.Config.Tty { sigc := dockerCli.ForwardAllSignals(ctx, opts.container) defer signal.StopCatch(sigc) } resp, errAttach := dockerCli.Client().ContainerAttach(ctx, opts.container, options) if errAttach != nil && errAttach != httputil.ErrPersistEOF { // ContainerAttach returns an ErrPersistEOF (connection closed) // means server met an error and put it in Hijacked connection // keep the error and read detailed error message from hijacked connection later return errAttach } defer resp.Close() if c.Config.Tty && dockerCli.IsTerminalOut() { height, width := dockerCli.GetTtySize() // To handle the case where a user repeatedly attaches/detaches without resizing their // terminal, the only way to get the shell prompt to display for attaches 2+ is to artificially // resize it, then go back to normal. Without this, every attach after the first will // require the user to manually resize or hit enter. dockerCli.ResizeTtyTo(ctx, opts.container, height+1, width+1, false) // After the above resizing occurs, the call to MonitorTtySize below will handle resetting back // to the actual size. if err := dockerCli.MonitorTtySize(ctx, opts.container, false); err != nil { logrus.Debugf("Error monitoring TTY size: %s", err) } } if err := dockerCli.HoldHijackedConnection(ctx, c.Config.Tty, in, dockerCli.Out(), dockerCli.Err(), resp); err != nil { return err } if errAttach != nil { return errAttach } _, status, err := getExitCode(dockerCli, ctx, opts.container) if err != nil { return err } if status != 0 { return cli.StatusError{StatusCode: status} } return nil }
func prettyPrintInfo(dockerCli *client.DockerCli, info types.Info) error { fmt.Fprintf(dockerCli.Out(), "Containers: %d\n", info.Containers) fmt.Fprintf(dockerCli.Out(), " Running: %d\n", info.ContainersRunning) fmt.Fprintf(dockerCli.Out(), " Paused: %d\n", info.ContainersPaused) fmt.Fprintf(dockerCli.Out(), " Stopped: %d\n", info.ContainersStopped) fmt.Fprintf(dockerCli.Out(), "Images: %d\n", info.Images) ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Server Version: %s\n", info.ServerVersion) ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Storage Driver: %s\n", info.Driver) if info.DriverStatus != nil { for _, pair := range info.DriverStatus { fmt.Fprintf(dockerCli.Out(), " %s: %s\n", pair[0], pair[1]) // print a warning if devicemapper is using a loopback file if pair[0] == "Data loop file" { fmt.Fprintln(dockerCli.Err(), " WARNING: Usage of loopback devices is strongly discouraged for production use. Use `--storage-opt dm.thinpooldev` to specify a custom block storage device.") } } } if info.SystemStatus != nil { for _, pair := range info.SystemStatus { fmt.Fprintf(dockerCli.Out(), "%s: %s\n", pair[0], pair[1]) } } ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Logging Driver: %s\n", info.LoggingDriver) ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Cgroup Driver: %s\n", info.CgroupDriver) fmt.Fprintf(dockerCli.Out(), "Plugins: \n") fmt.Fprintf(dockerCli.Out(), " Volume:") fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Volume, " ")) fmt.Fprintf(dockerCli.Out(), "\n") fmt.Fprintf(dockerCli.Out(), " Network:") fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Network, " ")) fmt.Fprintf(dockerCli.Out(), "\n") if len(info.Plugins.Authorization) != 0 { fmt.Fprintf(dockerCli.Out(), " Authorization:") fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Authorization, " ")) fmt.Fprintf(dockerCli.Out(), "\n") } fmt.Fprintf(dockerCli.Out(), "Swarm: %v\n", info.Swarm.LocalNodeState) if info.Swarm.LocalNodeState != swarm.LocalNodeStateInactive { fmt.Fprintf(dockerCli.Out(), " NodeID: %s\n", info.Swarm.NodeID) if info.Swarm.Error != "" { fmt.Fprintf(dockerCli.Out(), " Error: %v\n", info.Swarm.Error) } fmt.Fprintf(dockerCli.Out(), " Is Manager: %v\n", info.Swarm.ControlAvailable) if info.Swarm.ControlAvailable { fmt.Fprintf(dockerCli.Out(), " ClusterID: %s\n", info.Swarm.Cluster.ID) fmt.Fprintf(dockerCli.Out(), " Managers: %d\n", info.Swarm.Managers) fmt.Fprintf(dockerCli.Out(), " Nodes: %d\n", info.Swarm.Nodes) fmt.Fprintf(dockerCli.Out(), " Orchestration:\n") fmt.Fprintf(dockerCli.Out(), " Task History Retention Limit: %d\n", info.Swarm.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit) fmt.Fprintf(dockerCli.Out(), " Raft:\n") fmt.Fprintf(dockerCli.Out(), " Snapshot Interval: %d\n", info.Swarm.Cluster.Spec.Raft.SnapshotInterval) fmt.Fprintf(dockerCli.Out(), " Heartbeat Tick: %d\n", info.Swarm.Cluster.Spec.Raft.HeartbeatTick) fmt.Fprintf(dockerCli.Out(), " Election Tick: %d\n", info.Swarm.Cluster.Spec.Raft.ElectionTick) fmt.Fprintf(dockerCli.Out(), " Dispatcher:\n") fmt.Fprintf(dockerCli.Out(), " Heartbeat Period: %s\n", units.HumanDuration(time.Duration(info.Swarm.Cluster.Spec.Dispatcher.HeartbeatPeriod))) fmt.Fprintf(dockerCli.Out(), " CA Configuration:\n") fmt.Fprintf(dockerCli.Out(), " Expiry Duration: %s\n", units.HumanDuration(info.Swarm.Cluster.Spec.CAConfig.NodeCertExpiry)) if len(info.Swarm.Cluster.Spec.CAConfig.ExternalCAs) > 0 { fmt.Fprintf(dockerCli.Out(), " External CAs:\n") for _, entry := range info.Swarm.Cluster.Spec.CAConfig.ExternalCAs { fmt.Fprintf(dockerCli.Out(), " %s: %s\n", entry.Protocol, entry.URL) } } } fmt.Fprintf(dockerCli.Out(), " Node Address: %s\n", info.Swarm.NodeAddr) } if len(info.Runtimes) > 0 { fmt.Fprintf(dockerCli.Out(), "Runtimes:") for name := range info.Runtimes { fmt.Fprintf(dockerCli.Out(), " %s", name) } fmt.Fprint(dockerCli.Out(), "\n") fmt.Fprintf(dockerCli.Out(), "Default Runtime: %s\n", info.DefaultRuntime) } fmt.Fprintf(dockerCli.Out(), "Security Options:") ioutils.FprintfIfNotEmpty(dockerCli.Out(), " %s", strings.Join(info.SecurityOptions, " ")) fmt.Fprintf(dockerCli.Out(), "\n") ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Kernel Version: %s\n", info.KernelVersion) ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Operating System: %s\n", info.OperatingSystem) ioutils.FprintfIfNotEmpty(dockerCli.Out(), "OSType: %s\n", info.OSType) ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Architecture: %s\n", info.Architecture) fmt.Fprintf(dockerCli.Out(), "CPUs: %d\n", info.NCPU) fmt.Fprintf(dockerCli.Out(), "Total Memory: %s\n", units.BytesSize(float64(info.MemTotal))) ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Name: %s\n", info.Name) ioutils.FprintfIfNotEmpty(dockerCli.Out(), "ID: %s\n", info.ID) fmt.Fprintf(dockerCli.Out(), "Docker Root Dir: %s\n", info.DockerRootDir) fmt.Fprintf(dockerCli.Out(), "Debug Mode (client): %v\n", utils.IsDebugEnabled()) fmt.Fprintf(dockerCli.Out(), "Debug Mode (server): %v\n", info.Debug) if info.Debug { fmt.Fprintf(dockerCli.Out(), " File Descriptors: %d\n", info.NFd) fmt.Fprintf(dockerCli.Out(), " Goroutines: %d\n", info.NGoroutines) fmt.Fprintf(dockerCli.Out(), " System Time: %s\n", info.SystemTime) fmt.Fprintf(dockerCli.Out(), " EventsListeners: %d\n", info.NEventsListener) } ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Http Proxy: %s\n", info.HTTPProxy) ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Https Proxy: %s\n", info.HTTPSProxy) ioutils.FprintfIfNotEmpty(dockerCli.Out(), "No Proxy: %s\n", info.NoProxy) if info.IndexServerAddress != "" { u := dockerCli.ConfigFile().AuthConfigs[info.IndexServerAddress].Username if len(u) > 0 { fmt.Fprintf(dockerCli.Out(), "Username: %v\n", u) } fmt.Fprintf(dockerCli.Out(), "Registry: %v\n", info.IndexServerAddress) } // Only output these warnings if the server does not support these features if info.OSType != "windows" { if !info.MemoryLimit { fmt.Fprintln(dockerCli.Err(), "WARNING: No memory limit support") } if !info.SwapLimit { fmt.Fprintln(dockerCli.Err(), "WARNING: No swap limit support") } if !info.KernelMemory { fmt.Fprintln(dockerCli.Err(), "WARNING: No kernel memory limit support") } if !info.OomKillDisable { fmt.Fprintln(dockerCli.Err(), "WARNING: No oom kill disable support") } if !info.CPUCfsQuota { fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu cfs quota support") } if !info.CPUCfsPeriod { fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu cfs period support") } if !info.CPUShares { fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu shares support") } if !info.CPUSet { fmt.Fprintln(dockerCli.Err(), "WARNING: No cpuset support") } if !info.IPv4Forwarding { fmt.Fprintln(dockerCli.Err(), "WARNING: IPv4 forwarding is disabled") } if !info.BridgeNfIptables { fmt.Fprintln(dockerCli.Err(), "WARNING: bridge-nf-call-iptables is disabled") } if !info.BridgeNfIP6tables { fmt.Fprintln(dockerCli.Err(), "WARNING: bridge-nf-call-ip6tables is disabled") } } if info.Labels != nil { fmt.Fprintln(dockerCli.Out(), "Labels:") for _, attribute := range info.Labels { fmt.Fprintf(dockerCli.Out(), " %s\n", attribute) } } ioutils.FprintfIfTrue(dockerCli.Out(), "Experimental: %v\n", info.ExperimentalBuild) if info.ClusterStore != "" { fmt.Fprintf(dockerCli.Out(), "Cluster Store: %s\n", info.ClusterStore) } if info.ClusterAdvertise != "" { fmt.Fprintf(dockerCli.Out(), "Cluster Advertise: %s\n", info.ClusterAdvertise) } if info.RegistryConfig != nil && (len(info.RegistryConfig.InsecureRegistryCIDRs) > 0 || len(info.RegistryConfig.IndexConfigs) > 0) { fmt.Fprintln(dockerCli.Out(), "Insecure Registries:") for _, registry := range info.RegistryConfig.IndexConfigs { if registry.Secure == false { fmt.Fprintf(dockerCli.Out(), " %s\n", registry.Name) } } for _, registry := range info.RegistryConfig.InsecureRegistryCIDRs { mask, _ := registry.Mask.Size() fmt.Fprintf(dockerCli.Out(), " %s/%d\n", registry.IP.String(), mask) } } if info.RegistryConfig != nil && len(info.RegistryConfig.Mirrors) > 0 { fmt.Fprintln(dockerCli.Out(), "Registry Mirrors:") for _, mirror := range info.RegistryConfig.Mirrors { fmt.Fprintf(dockerCli.Out(), " %s\n", mirror) } } fmt.Fprintf(dockerCli.Out(), "Live Restore Enabled: %v\n", info.LiveRestoreEnabled) return nil }
func runInfo(dockerCli *client.DockerCli) error { info, err := dockerCli.Client().Info(context.Background()) if err != nil { return err } fmt.Fprintf(dockerCli.Out(), "Containers: %d\n", info.Containers) fmt.Fprintf(dockerCli.Out(), " Running: %d\n", info.ContainersRunning) fmt.Fprintf(dockerCli.Out(), " Paused: %d\n", info.ContainersPaused) fmt.Fprintf(dockerCli.Out(), " Stopped: %d\n", info.ContainersStopped) fmt.Fprintf(dockerCli.Out(), "Images: %d\n", info.Images) ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Server Version: %s\n", info.ServerVersion) ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Storage Driver: %s\n", info.Driver) if info.DriverStatus != nil { for _, pair := range info.DriverStatus { fmt.Fprintf(dockerCli.Out(), " %s: %s\n", pair[0], pair[1]) // print a warning if devicemapper is using a loopback file if pair[0] == "Data loop file" { fmt.Fprintln(dockerCli.Err(), " WARNING: Usage of loopback devices is strongly discouraged for production use. Either use `--storage-opt dm.thinpooldev` or use `--storage-opt dm.no_warn_on_loop_devices=true` to suppress this warning.") } } } if info.SystemStatus != nil { for _, pair := range info.SystemStatus { fmt.Fprintf(dockerCli.Out(), "%s: %s\n", pair[0], pair[1]) } } ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Execution Driver: %s\n", info.ExecutionDriver) ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Logging Driver: %s\n", info.LoggingDriver) ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Cgroup Driver: %s\n", info.CgroupDriver) fmt.Fprintf(dockerCli.Out(), "Plugins: \n") fmt.Fprintf(dockerCli.Out(), " Volume:") fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Volume, " ")) fmt.Fprintf(dockerCli.Out(), "\n") fmt.Fprintf(dockerCli.Out(), " Network:") fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Network, " ")) fmt.Fprintf(dockerCli.Out(), "\n") if len(info.Plugins.Authorization) != 0 { fmt.Fprintf(dockerCli.Out(), " Authorization:") fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Authorization, " ")) fmt.Fprintf(dockerCli.Out(), "\n") } fmt.Fprintf(dockerCli.Out(), "Swarm: %v\n", info.Swarm.LocalNodeState) if info.Swarm.LocalNodeState != swarm.LocalNodeStateInactive { fmt.Fprintf(dockerCli.Out(), " NodeID: %s\n", info.Swarm.NodeID) if info.Swarm.Error != "" { fmt.Fprintf(dockerCli.Out(), " Error: %v\n", info.Swarm.Error) } if info.Swarm.ControlAvailable { fmt.Fprintf(dockerCli.Out(), " IsManager: Yes\n") fmt.Fprintf(dockerCli.Out(), " Managers: %d\n", info.Swarm.Managers) fmt.Fprintf(dockerCli.Out(), " Nodes: %d\n", info.Swarm.Nodes) ioutils.FprintfIfNotEmpty(dockerCli.Out(), " CACertHash: %s\n", info.Swarm.CACertHash) } else { fmt.Fprintf(dockerCli.Out(), " IsManager: No\n") } } if len(info.Runtimes) > 0 { fmt.Fprintf(dockerCli.Out(), "Runtimes:") for name := range info.Runtimes { fmt.Fprintf(dockerCli.Out(), " %s", name) } fmt.Fprint(dockerCli.Out(), "\n") fmt.Fprintf(dockerCli.Out(), "Default Runtime: %s\n", info.DefaultRuntime) } fmt.Fprintf(dockerCli.Out(), "Security Options:") ioutils.FprintfIfNotEmpty(dockerCli.Out(), " %s", strings.Join(info.SecurityOptions, " ")) fmt.Fprintf(dockerCli.Out(), "\n") ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Kernel Version: %s\n", info.KernelVersion) ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Operating System: %s\n", info.OperatingSystem) ioutils.FprintfIfNotEmpty(dockerCli.Out(), "OSType: %s\n", info.OSType) ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Architecture: %s\n", info.Architecture) fmt.Fprintf(dockerCli.Out(), "CPUs: %d\n", info.NCPU) fmt.Fprintf(dockerCli.Out(), "Total Memory: %s\n", units.BytesSize(float64(info.MemTotal))) ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Name: %s\n", info.Name) ioutils.FprintfIfNotEmpty(dockerCli.Out(), "ID: %s\n", info.ID) fmt.Fprintf(dockerCli.Out(), "Docker Root Dir: %s\n", info.DockerRootDir) fmt.Fprintf(dockerCli.Out(), "Debug Mode (client): %v\n", utils.IsDebugEnabled()) fmt.Fprintf(dockerCli.Out(), "Debug Mode (server): %v\n", info.Debug) if info.Debug { fmt.Fprintf(dockerCli.Out(), " File Descriptors: %d\n", info.NFd) fmt.Fprintf(dockerCli.Out(), " Goroutines: %d\n", info.NGoroutines) fmt.Fprintf(dockerCli.Out(), " System Time: %s\n", info.SystemTime) fmt.Fprintf(dockerCli.Out(), " EventsListeners: %d\n", info.NEventsListener) } ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Http Proxy: %s\n", info.HTTPProxy) ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Https Proxy: %s\n", info.HTTPSProxy) ioutils.FprintfIfNotEmpty(dockerCli.Out(), "No Proxy: %s\n", info.NoProxy) if info.IndexServerAddress != "" { u := dockerCli.ConfigFile().AuthConfigs[info.IndexServerAddress].Username if len(u) > 0 { fmt.Fprintf(dockerCli.Out(), "Username: %v\n", u) } fmt.Fprintf(dockerCli.Out(), "Registry: %v\n", info.IndexServerAddress) } // Only output these warnings if the server does not support these features if info.OSType != "windows" { if !info.MemoryLimit { fmt.Fprintln(dockerCli.Err(), "WARNING: No memory limit support") } if !info.SwapLimit { fmt.Fprintln(dockerCli.Err(), "WARNING: No swap limit support") } if !info.KernelMemory { fmt.Fprintln(dockerCli.Err(), "WARNING: No kernel memory limit support") } if !info.OomKillDisable { fmt.Fprintln(dockerCli.Err(), "WARNING: No oom kill disable support") } if !info.CPUCfsQuota { fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu cfs quota support") } if !info.CPUCfsPeriod { fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu cfs period support") } if !info.CPUShares { fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu shares support") } if !info.CPUSet { fmt.Fprintln(dockerCli.Err(), "WARNING: No cpuset support") } if !info.IPv4Forwarding { fmt.Fprintln(dockerCli.Err(), "WARNING: IPv4 forwarding is disabled") } if !info.BridgeNfIptables { fmt.Fprintln(dockerCli.Err(), "WARNING: bridge-nf-call-iptables is disabled") } if !info.BridgeNfIP6tables { fmt.Fprintln(dockerCli.Err(), "WARNING: bridge-nf-call-ip6tables is disabled") } } if info.Labels != nil { fmt.Fprintln(dockerCli.Out(), "Labels:") for _, attribute := range info.Labels { fmt.Fprintf(dockerCli.Out(), " %s\n", attribute) } } ioutils.FprintfIfTrue(dockerCli.Out(), "Experimental: %v\n", info.ExperimentalBuild) if info.ClusterStore != "" { fmt.Fprintf(dockerCli.Out(), "Cluster Store: %s\n", info.ClusterStore) } if info.ClusterAdvertise != "" { fmt.Fprintf(dockerCli.Out(), "Cluster Advertise: %s\n", info.ClusterAdvertise) } if info.RegistryConfig != nil && (len(info.RegistryConfig.InsecureRegistryCIDRs) > 0 || len(info.RegistryConfig.IndexConfigs) > 0) { fmt.Fprintln(dockerCli.Out(), "Insecure Registries:") for _, registry := range info.RegistryConfig.IndexConfigs { if registry.Secure == false { fmt.Fprintf(dockerCli.Out(), " %s\n", registry.Name) } } for _, registry := range info.RegistryConfig.InsecureRegistryCIDRs { mask, _ := registry.Mask.Size() fmt.Fprintf(dockerCli.Out(), " %s/%d\n", registry.IP.String(), mask) } } return nil }
func runStart(dockerCli *client.DockerCli, opts *startOptions) error { ctx, cancelFun := context.WithCancel(context.Background()) if opts.attach || opts.openStdin { // We're going to attach to a container. // 1. Ensure we only have one container. if len(opts.containers) > 1 { return fmt.Errorf("You cannot start and attach multiple containers at once.") } // 2. Attach to the container. container := opts.containers[0] c, err := dockerCli.Client().ContainerInspect(ctx, container) if err != nil { return err } // We always use c.ID instead of container to maintain consistency during `docker start` if !c.Config.Tty { sigc := dockerCli.ForwardAllSignals(ctx, c.ID) defer signal.StopCatch(sigc) } if opts.detachKeys != "" { dockerCli.ConfigFile().DetachKeys = opts.detachKeys } options := types.ContainerAttachOptions{ Stream: true, Stdin: opts.openStdin && c.Config.OpenStdin, Stdout: true, Stderr: true, DetachKeys: dockerCli.ConfigFile().DetachKeys, } var in io.ReadCloser if options.Stdin { in = dockerCli.In() } resp, errAttach := dockerCli.Client().ContainerAttach(ctx, c.ID, options) if errAttach != nil && errAttach != httputil.ErrPersistEOF { // ContainerAttach return an ErrPersistEOF (connection closed) // means server met an error and put it in Hijacked connection // keep the error and read detailed error message from hijacked connection return errAttach } defer resp.Close() cErr := promise.Go(func() error { errHijack := dockerCli.HoldHijackedConnection(ctx, c.Config.Tty, in, dockerCli.Out(), dockerCli.Err(), resp) if errHijack == nil { return errAttach } return errHijack }) // 3. Start the container. if err := dockerCli.Client().ContainerStart(ctx, c.ID, types.ContainerStartOptions{}); err != nil { cancelFun() <-cErr return err } // 4. Wait for attachment to break. if c.Config.Tty && dockerCli.IsTerminalOut() { if err := dockerCli.MonitorTtySize(ctx, c.ID, false); err != nil { fmt.Fprintf(dockerCli.Err(), "Error monitoring TTY size: %s\n", err) } } if attchErr := <-cErr; attchErr != nil { return attchErr } _, status, err := getExitCode(dockerCli, ctx, c.ID) if err != nil { return err } if status != 0 { return cli.StatusError{StatusCode: status} } } else { // We're not going to attach to anything. // Start as many containers as we want. return startContainersWithoutAttachments(dockerCli, ctx, opts.containers) } return nil }
func runRun(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts *runOptions, copts *runconfigopts.ContainerOptions) error { stdout, stderr, stdin := dockerCli.Out(), dockerCli.Err(), dockerCli.In() client := dockerCli.Client() // TODO: pass this as an argument cmdPath := "run" var ( flAttach *opttypes.ListOpts ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d") ErrConflictRestartPolicyAndAutoRemove = fmt.Errorf("Conflicting options: --restart and --rm") ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: --rm and -d") ) config, hostConfig, networkingConfig, err := runconfigopts.Parse(flags, copts) // just in case the Parse does not exit if err != nil { reportError(stderr, cmdPath, err.Error(), true) return cli.StatusError{StatusCode: 125} } if hostConfig.OomKillDisable != nil && *hostConfig.OomKillDisable && hostConfig.Memory == 0 { fmt.Fprintf(stderr, "WARNING: Disabling the OOM killer on containers without setting a '-m/--memory' limit may be dangerous.\n") } if len(hostConfig.DNS) > 0 { // check the DNS settings passed via --dns against // localhost regexp to warn if they are trying to // set a DNS to a localhost address for _, dnsIP := range hostConfig.DNS { if dns.IsLocalhost(dnsIP) { fmt.Fprintf(stderr, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP) break } } } config.ArgsEscaped = false if !opts.detach { if err := dockerCli.CheckTtyInput(config.AttachStdin, config.Tty); err != nil { return err } } else { if fl := flags.Lookup("attach"); fl != nil { flAttach = fl.Value.(*opttypes.ListOpts) if flAttach.Len() != 0 { return ErrConflictAttachDetach } } if opts.autoRemove { return ErrConflictDetachAutoRemove } config.AttachStdin = false config.AttachStdout = false config.AttachStderr = false config.StdinOnce = false } // Disable sigProxy when in TTY mode if config.Tty { opts.sigProxy = false } // Telling the Windows daemon the initial size of the tty during start makes // a far better user experience rather than relying on subsequent resizes // to cause things to catch up. if runtime.GOOS == "windows" { hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = dockerCli.GetTtySize() } ctx, cancelFun := context.WithCancel(context.Background()) createResponse, err := createContainer(ctx, dockerCli, config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, opts.name) if err != nil { reportError(stderr, cmdPath, err.Error(), true) return runStartContainerErr(err) } if opts.sigProxy { sigc := dockerCli.ForwardAllSignals(ctx, createResponse.ID) defer signal.StopCatch(sigc) } var ( waitDisplayID chan struct{} errCh chan error ) if !config.AttachStdout && !config.AttachStderr { // Make this asynchronous to allow the client to write to stdin before having to read the ID waitDisplayID = make(chan struct{}) go func() { defer close(waitDisplayID) fmt.Fprintf(stdout, "%s\n", createResponse.ID) }() } if opts.autoRemove && (hostConfig.RestartPolicy.IsAlways() || hostConfig.RestartPolicy.IsOnFailure()) { return ErrConflictRestartPolicyAndAutoRemove } attach := config.AttachStdin || config.AttachStdout || config.AttachStderr if attach { var ( out, cerr io.Writer in io.ReadCloser ) if config.AttachStdin { in = stdin } if config.AttachStdout { out = stdout } if config.AttachStderr { if config.Tty { cerr = stdout } else { cerr = stderr } } if opts.detachKeys != "" { dockerCli.ConfigFile().DetachKeys = opts.detachKeys } options := types.ContainerAttachOptions{ Stream: true, Stdin: config.AttachStdin, Stdout: config.AttachStdout, Stderr: config.AttachStderr, DetachKeys: dockerCli.ConfigFile().DetachKeys, } resp, errAttach := client.ContainerAttach(ctx, createResponse.ID, options) if errAttach != nil && errAttach != httputil.ErrPersistEOF { // ContainerAttach returns an ErrPersistEOF (connection closed) // means server met an error and put it in Hijacked connection // keep the error and read detailed error message from hijacked connection later return errAttach } defer resp.Close() errCh = promise.Go(func() error { errHijack := dockerCli.HoldHijackedConnection(ctx, config.Tty, in, out, cerr, resp) if errHijack == nil { return errAttach } return errHijack }) } if opts.autoRemove { defer func() { // Explicitly not sharing the context as it could be "Done" (by calling cancelFun) // and thus the container would not be removed. if err := removeContainer(dockerCli, context.Background(), createResponse.ID, true, false, true); err != nil { fmt.Fprintf(stderr, "%v\n", err) } }() } //start the container if err := client.ContainerStart(ctx, createResponse.ID, types.ContainerStartOptions{}); err != nil { // If we have holdHijackedConnection, we should notify // holdHijackedConnection we are going to exit and wait // to avoid the terminal are not restored. if attach { cancelFun() <-errCh } reportError(stderr, cmdPath, err.Error(), false) return runStartContainerErr(err) } if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && dockerCli.IsTerminalOut() { if err := dockerCli.MonitorTtySize(ctx, createResponse.ID, false); err != nil { fmt.Fprintf(stderr, "Error monitoring TTY size: %s\n", err) } } if errCh != nil { if err := <-errCh; err != nil { logrus.Debugf("Error hijack: %s", err) return err } } // Detached mode: wait for the id to be displayed and return. if !config.AttachStdout && !config.AttachStderr { // Detached mode <-waitDisplayID return nil } var status int // Attached mode if opts.autoRemove { // Autoremove: wait for the container to finish, retrieve // the exit code and remove the container if status, err = client.ContainerWait(ctx, createResponse.ID); err != nil { return runStartContainerErr(err) } if _, status, err = dockerCli.GetExitCode(ctx, createResponse.ID); err != nil { return err } } else { // No Autoremove: Simply retrieve the exit code if !config.Tty { // In non-TTY mode, we can't detach, so we must wait for container exit if status, err = client.ContainerWait(ctx, createResponse.ID); err != nil { return err } } else { // In TTY mode, there is a race: if the process dies too slowly, the state could // be updated after the getExitCode call and result in the wrong exit code being reported if _, status, err = dockerCli.GetExitCode(ctx, createResponse.ID); err != nil { return err } } } if status != 0 { return cli.StatusError{StatusCode: status} } return nil }
func runBuild(dockerCli *client.DockerCli, options buildOptions) error { var ( buildCtx io.ReadCloser err error ) specifiedContext := options.context var ( contextDir string tempDir string relDockerfile string progBuff io.Writer buildBuff io.Writer ) progBuff = dockerCli.Out() buildBuff = dockerCli.Out() if options.quiet { progBuff = bytes.NewBuffer(nil) buildBuff = bytes.NewBuffer(nil) } switch { case specifiedContext == "-": buildCtx, relDockerfile, err = builder.GetContextFromReader(dockerCli.In(), options.dockerfileName) case urlutil.IsGitURL(specifiedContext): tempDir, relDockerfile, err = builder.GetContextFromGitURL(specifiedContext, options.dockerfileName) case urlutil.IsURL(specifiedContext): buildCtx, relDockerfile, err = builder.GetContextFromURL(progBuff, specifiedContext, options.dockerfileName) default: contextDir, relDockerfile, err = builder.GetContextFromLocalDir(specifiedContext, options.dockerfileName) } if err != nil { if options.quiet && urlutil.IsURL(specifiedContext) { fmt.Fprintln(dockerCli.Err(), progBuff) } return fmt.Errorf("unable to prepare context: %s", err) } if tempDir != "" { defer os.RemoveAll(tempDir) contextDir = tempDir } if buildCtx == nil { // And canonicalize dockerfile name to a platform-independent one relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile) if err != nil { return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err) } f, err := os.Open(filepath.Join(contextDir, ".dockerignore")) if err != nil && !os.IsNotExist(err) { return err } var excludes []string if err == nil { excludes, err = dockerignore.ReadAll(f) if err != nil { return err } } if err := builder.ValidateContextDirectory(contextDir, excludes); err != nil { return fmt.Errorf("Error checking context: '%s'.", err) } // If .dockerignore mentions .dockerignore or the Dockerfile // then make sure we send both files over to the daemon // because Dockerfile is, obviously, needed no matter what, and // .dockerignore is needed to know if either one needs to be // removed. The daemon will remove them for us, if needed, after it // parses the Dockerfile. Ignore errors here, as they will have been // caught by validateContextDirectory above. var includes = []string{"."} keepThem1, _ := fileutils.Matches(".dockerignore", excludes) keepThem2, _ := fileutils.Matches(relDockerfile, excludes) if keepThem1 || keepThem2 { includes = append(includes, ".dockerignore", relDockerfile) } buildCtx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{ Compression: archive.Uncompressed, ExcludePatterns: excludes, IncludeFiles: includes, }) if err != nil { return err } } ctx := context.Background() var resolvedTags []*resolvedTag if client.IsTrusted() { // Wrap the tar archive to replace the Dockerfile entry with the rewritten // Dockerfile which uses trusted pulls. buildCtx = replaceDockerfileTarWrapper(ctx, buildCtx, relDockerfile, dockerCli.TrustedReference, &resolvedTags) } // Setup an upload progress bar progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(progBuff, true) var body io.Reader = progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon") var memory int64 if options.memory != "" { parsedMemory, err := units.RAMInBytes(options.memory) if err != nil { return err } memory = parsedMemory } var memorySwap int64 if options.memorySwap != "" { if options.memorySwap == "-1" { memorySwap = -1 } else { parsedMemorySwap, err := units.RAMInBytes(options.memorySwap) if err != nil { return err } memorySwap = parsedMemorySwap } } var shmSize int64 if options.shmSize != "" { shmSize, err = units.RAMInBytes(options.shmSize) if err != nil { return err } } buildOptions := types.ImageBuildOptions{ Memory: memory, MemorySwap: memorySwap, Tags: options.tags.GetAll(), SuppressOutput: options.quiet, NoCache: options.noCache, Remove: options.rm, ForceRemove: options.forceRm, PullParent: options.pull, Isolation: container.Isolation(options.isolation), CPUSetCPUs: options.cpuSetCpus, CPUSetMems: options.cpuSetMems, CPUShares: options.cpuShares, CPUQuota: options.cpuQuota, CPUPeriod: options.cpuPeriod, CgroupParent: options.cgroupParent, Dockerfile: relDockerfile, ShmSize: shmSize, Ulimits: options.ulimits.GetList(), BuildArgs: runconfigopts.ConvertKVStringsToMap(options.buildArgs.GetAll()), AuthConfigs: dockerCli.RetrieveAuthConfigs(), Labels: runconfigopts.ConvertKVStringsToMap(options.labels), } response, err := dockerCli.Client().ImageBuild(ctx, body, buildOptions) if err != nil { return err } defer response.Body.Close() err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, dockerCli.OutFd(), dockerCli.IsTerminalOut(), nil) if err != nil { if jerr, ok := err.(*jsonmessage.JSONError); ok { // If no error code is set, default to 1 if jerr.Code == 0 { jerr.Code = 1 } if options.quiet { fmt.Fprintf(dockerCli.Err(), "%s%s", progBuff, buildBuff) } return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code} } } // Windows: show error message about modified file permissions if the // daemon isn't running Windows. if response.OSType != "windows" && runtime.GOOS == "windows" && !options.quiet { fmt.Fprintln(dockerCli.Err(), `SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`) } // Everything worked so if -q was provided the output from the daemon // should be just the image ID and we'll print that to stdout. if options.quiet { fmt.Fprintf(dockerCli.Out(), "%s", buildBuff) } if client.IsTrusted() { // Since the build was successful, now we must tag any of the resolved // images from the above Dockerfile rewrite. for _, resolved := range resolvedTags { if err := dockerCli.TagTrusted(ctx, resolved.digestRef, resolved.tagRef); err != nil { return err } } } return nil }