func runLoad(dockerCli *client.DockerCli, opts loadOptions) error { var input io.Reader = dockerCli.In() if opts.input != "" { file, err := os.Open(opts.input) if err != nil { return err } defer file.Close() input = file } if !dockerCli.Out().IsTerminal() { opts.quiet = true } response, err := dockerCli.Client().ImageLoad(context.Background(), input, opts.quiet) if err != nil { return err } defer response.Body.Close() if response.Body != nil && response.JSON { return jsonmessage.DisplayJSONMessagesToStream(response.Body, dockerCli.Out(), nil) } _, err = io.Copy(dockerCli.Out(), response.Body) return err }
func acceptPrivileges(dockerCli *client.DockerCli, name string) func(privileges types.PluginPrivileges) (bool, error) { return func(privileges types.PluginPrivileges) (bool, error) { fmt.Fprintf(dockerCli.Out(), "Plugin %q is requesting the following privileges:\n", name) for _, privilege := range privileges { fmt.Fprintf(dockerCli.Out(), " - %s: %v\n", privilege.Name, privilege.Value) } fmt.Fprint(dockerCli.Out(), "Do you grant the above permissions? [y/N] ") reader := bufio.NewReader(dockerCli.In()) line, _, err := reader.ReadLine() if err != nil { return false, err } return strings.ToLower(string(line)) == "y", nil } }
func runImport(dockerCli *client.DockerCli, opts importOptions) error { var ( in io.Reader srcName = opts.source ) if opts.source == "-" { in = dockerCli.In() } else if !urlutil.IsURL(opts.source) { srcName = "-" file, err := os.Open(opts.source) if err != nil { return err } defer file.Close() in = file } source := types.ImageImportSource{ Source: in, SourceName: srcName, } options := types.ImageImportOptions{ Message: opts.message, Changes: opts.changes, } clnt := dockerCli.Client() responseBody, err := clnt.ImageImport(context.Background(), source, opts.reference, options) if err != nil { return err } defer responseBody.Close() return jsonmessage.DisplayJSONMessagesStream(responseBody, dockerCli.Out(), dockerCli.OutFd(), dockerCli.IsTerminalOut(), nil) }
func runAttach(dockerCli *client.DockerCli, opts *attachOptions) error { ctx := context.Background() c, err := dockerCli.Client().ContainerInspect(ctx, opts.container) if err != nil { return err } if !c.State.Running { return fmt.Errorf("You cannot attach to a stopped container, start it first") } if c.State.Paused { return fmt.Errorf("You cannot attach to a paused container, unpause it first") } if err := dockerCli.CheckTtyInput(!opts.noStdin, c.Config.Tty); err != nil { return err } if opts.detachKeys != "" { dockerCli.ConfigFile().DetachKeys = opts.detachKeys } options := types.ContainerAttachOptions{ Stream: true, Stdin: !opts.noStdin && c.Config.OpenStdin, Stdout: true, Stderr: true, DetachKeys: dockerCli.ConfigFile().DetachKeys, } var in io.ReadCloser if options.Stdin { in = dockerCli.In() } if opts.proxy && !c.Config.Tty { sigc := dockerCli.ForwardAllSignals(ctx, opts.container) defer signal.StopCatch(sigc) } resp, errAttach := dockerCli.Client().ContainerAttach(ctx, opts.container, options) if errAttach != nil && errAttach != httputil.ErrPersistEOF { // ContainerAttach returns an ErrPersistEOF (connection closed) // means server met an error and put it in Hijacked connection // keep the error and read detailed error message from hijacked connection later return errAttach } defer resp.Close() if c.Config.Tty && dockerCli.IsTerminalOut() { height, width := dockerCli.GetTtySize() // To handle the case where a user repeatedly attaches/detaches without resizing their // terminal, the only way to get the shell prompt to display for attaches 2+ is to artificially // resize it, then go back to normal. Without this, every attach after the first will // require the user to manually resize or hit enter. dockerCli.ResizeTtyTo(ctx, opts.container, height+1, width+1, false) // After the above resizing occurs, the call to MonitorTtySize below will handle resetting back // to the actual size. if err := dockerCli.MonitorTtySize(ctx, opts.container, false); err != nil { logrus.Debugf("Error monitoring TTY size: %s", err) } } if err := dockerCli.HoldHijackedConnection(ctx, c.Config.Tty, in, dockerCli.Out(), dockerCli.Err(), resp); err != nil { return err } if errAttach != nil { return errAttach } _, status, err := getExitCode(dockerCli, ctx, opts.container) if err != nil { return err } if status != 0 { return cli.StatusError{StatusCode: status} } return nil }
func runInstall(dockerCli *client.DockerCli, options pluginOptions) error { named, err := reference.ParseNamed(options.name) // FIXME: validate if err != nil { return err } named = reference.WithDefaultTag(named) ref, ok := named.(reference.NamedTagged) if !ok { return fmt.Errorf("invalid name: %s", named.String()) } ctx := context.Background() repoInfo, err := registry.ParseRepositoryInfo(named) authConfig := dockerCli.ResolveAuthConfig(ctx, repoInfo.Index) encodedAuth, err := client.EncodeAuthToBase64(authConfig) if err != nil { return err } // TODO: pass noEnable flag return dockerCli.Client().PluginInstall(ctx, ref.String(), encodedAuth, options.grantPerms, false, dockerCli.In(), dockerCli.Out()) }
func runStart(dockerCli *client.DockerCli, opts *startOptions) error { ctx, cancelFun := context.WithCancel(context.Background()) if opts.attach || opts.openStdin { // We're going to attach to a container. // 1. Ensure we only have one container. if len(opts.containers) > 1 { return fmt.Errorf("You cannot start and attach multiple containers at once.") } // 2. Attach to the container. container := opts.containers[0] c, err := dockerCli.Client().ContainerInspect(ctx, container) if err != nil { return err } // We always use c.ID instead of container to maintain consistency during `docker start` if !c.Config.Tty { sigc := dockerCli.ForwardAllSignals(ctx, c.ID) defer signal.StopCatch(sigc) } if opts.detachKeys != "" { dockerCli.ConfigFile().DetachKeys = opts.detachKeys } options := types.ContainerAttachOptions{ Stream: true, Stdin: opts.openStdin && c.Config.OpenStdin, Stdout: true, Stderr: true, DetachKeys: dockerCli.ConfigFile().DetachKeys, } var in io.ReadCloser if options.Stdin { in = dockerCli.In() } resp, errAttach := dockerCli.Client().ContainerAttach(ctx, c.ID, options) if errAttach != nil && errAttach != httputil.ErrPersistEOF { // ContainerAttach return an ErrPersistEOF (connection closed) // means server met an error and put it in Hijacked connection // keep the error and read detailed error message from hijacked connection return errAttach } defer resp.Close() cErr := promise.Go(func() error { errHijack := dockerCli.HoldHijackedConnection(ctx, c.Config.Tty, in, dockerCli.Out(), dockerCli.Err(), resp) if errHijack == nil { return errAttach } return errHijack }) // 3. Start the container. if err := dockerCli.Client().ContainerStart(ctx, c.ID, types.ContainerStartOptions{}); err != nil { cancelFun() <-cErr return err } // 4. Wait for attachment to break. if c.Config.Tty && dockerCli.IsTerminalOut() { if err := dockerCli.MonitorTtySize(ctx, c.ID, false); err != nil { fmt.Fprintf(dockerCli.Err(), "Error monitoring TTY size: %s\n", err) } } if attchErr := <-cErr; attchErr != nil { return attchErr } _, status, err := getExitCode(dockerCli, ctx, c.ID) if err != nil { return err } if status != 0 { return cli.StatusError{StatusCode: status} } } else { // We're not going to attach to anything. // Start as many containers as we want. return startContainersWithoutAttachments(dockerCli, ctx, opts.containers) } return nil }
func runBuild(dockerCli *client.DockerCli, options buildOptions) error { var ( buildCtx io.ReadCloser err error ) specifiedContext := options.context var ( contextDir string tempDir string relDockerfile string progBuff io.Writer buildBuff io.Writer ) progBuff = dockerCli.Out() buildBuff = dockerCli.Out() if options.quiet { progBuff = bytes.NewBuffer(nil) buildBuff = bytes.NewBuffer(nil) } switch { case specifiedContext == "-": buildCtx, relDockerfile, err = builder.GetContextFromReader(dockerCli.In(), options.dockerfileName) case urlutil.IsGitURL(specifiedContext): tempDir, relDockerfile, err = builder.GetContextFromGitURL(specifiedContext, options.dockerfileName) case urlutil.IsURL(specifiedContext): buildCtx, relDockerfile, err = builder.GetContextFromURL(progBuff, specifiedContext, options.dockerfileName) default: contextDir, relDockerfile, err = builder.GetContextFromLocalDir(specifiedContext, options.dockerfileName) } if err != nil { if options.quiet && urlutil.IsURL(specifiedContext) { fmt.Fprintln(dockerCli.Err(), progBuff) } return fmt.Errorf("unable to prepare context: %s", err) } if tempDir != "" { defer os.RemoveAll(tempDir) contextDir = tempDir } if buildCtx == nil { // And canonicalize dockerfile name to a platform-independent one relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile) if err != nil { return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err) } f, err := os.Open(filepath.Join(contextDir, ".dockerignore")) if err != nil && !os.IsNotExist(err) { return err } var excludes []string if err == nil { excludes, err = dockerignore.ReadAll(f) if err != nil { return err } } if err := builder.ValidateContextDirectory(contextDir, excludes); err != nil { return fmt.Errorf("Error checking context: '%s'.", err) } // If .dockerignore mentions .dockerignore or the Dockerfile // then make sure we send both files over to the daemon // because Dockerfile is, obviously, needed no matter what, and // .dockerignore is needed to know if either one needs to be // removed. The daemon will remove them for us, if needed, after it // parses the Dockerfile. Ignore errors here, as they will have been // caught by validateContextDirectory above. var includes = []string{"."} keepThem1, _ := fileutils.Matches(".dockerignore", excludes) keepThem2, _ := fileutils.Matches(relDockerfile, excludes) if keepThem1 || keepThem2 { includes = append(includes, ".dockerignore", relDockerfile) } buildCtx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{ Compression: archive.Uncompressed, ExcludePatterns: excludes, IncludeFiles: includes, }) if err != nil { return err } } ctx := context.Background() var resolvedTags []*resolvedTag if client.IsTrusted() { // Wrap the tar archive to replace the Dockerfile entry with the rewritten // Dockerfile which uses trusted pulls. buildCtx = replaceDockerfileTarWrapper(ctx, buildCtx, relDockerfile, dockerCli.TrustedReference, &resolvedTags) } // Setup an upload progress bar progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(progBuff, true) var body io.Reader = progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon") var memory int64 if options.memory != "" { parsedMemory, err := units.RAMInBytes(options.memory) if err != nil { return err } memory = parsedMemory } var memorySwap int64 if options.memorySwap != "" { if options.memorySwap == "-1" { memorySwap = -1 } else { parsedMemorySwap, err := units.RAMInBytes(options.memorySwap) if err != nil { return err } memorySwap = parsedMemorySwap } } var shmSize int64 if options.shmSize != "" { shmSize, err = units.RAMInBytes(options.shmSize) if err != nil { return err } } buildOptions := types.ImageBuildOptions{ Memory: memory, MemorySwap: memorySwap, Tags: options.tags.GetAll(), SuppressOutput: options.quiet, NoCache: options.noCache, Remove: options.rm, ForceRemove: options.forceRm, PullParent: options.pull, Isolation: container.Isolation(options.isolation), CPUSetCPUs: options.cpuSetCpus, CPUSetMems: options.cpuSetMems, CPUShares: options.cpuShares, CPUQuota: options.cpuQuota, CPUPeriod: options.cpuPeriod, CgroupParent: options.cgroupParent, Dockerfile: relDockerfile, ShmSize: shmSize, Ulimits: options.ulimits.GetList(), BuildArgs: runconfigopts.ConvertKVStringsToMap(options.buildArgs.GetAll()), AuthConfigs: dockerCli.RetrieveAuthConfigs(), Labels: runconfigopts.ConvertKVStringsToMap(options.labels), } response, err := dockerCli.Client().ImageBuild(ctx, body, buildOptions) if err != nil { return err } defer response.Body.Close() err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, dockerCli.OutFd(), dockerCli.IsTerminalOut(), nil) if err != nil { if jerr, ok := err.(*jsonmessage.JSONError); ok { // If no error code is set, default to 1 if jerr.Code == 0 { jerr.Code = 1 } if options.quiet { fmt.Fprintf(dockerCli.Err(), "%s%s", progBuff, buildBuff) } return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code} } } // Windows: show error message about modified file permissions if the // daemon isn't running Windows. if response.OSType != "windows" && runtime.GOOS == "windows" && !options.quiet { fmt.Fprintln(dockerCli.Err(), `SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`) } // Everything worked so if -q was provided the output from the daemon // should be just the image ID and we'll print that to stdout. if options.quiet { fmt.Fprintf(dockerCli.Out(), "%s", buildBuff) } if client.IsTrusted() { // Since the build was successful, now we must tag any of the resolved // images from the above Dockerfile rewrite. for _, resolved := range resolvedTags { if err := dockerCli.TagTrusted(ctx, resolved.digestRef, resolved.tagRef); err != nil { return err } } } return nil }
func runRun(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts *runOptions, copts *runconfigopts.ContainerOptions) error { stdout, stderr, stdin := dockerCli.Out(), dockerCli.Err(), dockerCli.In() client := dockerCli.Client() // TODO: pass this as an argument cmdPath := "run" var ( flAttach *opttypes.ListOpts ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d") ErrConflictRestartPolicyAndAutoRemove = fmt.Errorf("Conflicting options: --restart and --rm") ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: --rm and -d") ) config, hostConfig, networkingConfig, err := runconfigopts.Parse(flags, copts) // just in case the Parse does not exit if err != nil { reportError(stderr, cmdPath, err.Error(), true) return cli.StatusError{StatusCode: 125} } if hostConfig.OomKillDisable != nil && *hostConfig.OomKillDisable && hostConfig.Memory == 0 { fmt.Fprintf(stderr, "WARNING: Disabling the OOM killer on containers without setting a '-m/--memory' limit may be dangerous.\n") } if len(hostConfig.DNS) > 0 { // check the DNS settings passed via --dns against // localhost regexp to warn if they are trying to // set a DNS to a localhost address for _, dnsIP := range hostConfig.DNS { if dns.IsLocalhost(dnsIP) { fmt.Fprintf(stderr, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP) break } } } config.ArgsEscaped = false if !opts.detach { if err := dockerCli.CheckTtyInput(config.AttachStdin, config.Tty); err != nil { return err } } else { if fl := flags.Lookup("attach"); fl != nil { flAttach = fl.Value.(*opttypes.ListOpts) if flAttach.Len() != 0 { return ErrConflictAttachDetach } } if opts.autoRemove { return ErrConflictDetachAutoRemove } config.AttachStdin = false config.AttachStdout = false config.AttachStderr = false config.StdinOnce = false } // Disable sigProxy when in TTY mode if config.Tty { opts.sigProxy = false } // Telling the Windows daemon the initial size of the tty during start makes // a far better user experience rather than relying on subsequent resizes // to cause things to catch up. if runtime.GOOS == "windows" { hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = dockerCli.GetTtySize() } ctx, cancelFun := context.WithCancel(context.Background()) createResponse, err := createContainer(ctx, dockerCli, config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, opts.name) if err != nil { reportError(stderr, cmdPath, err.Error(), true) return runStartContainerErr(err) } if opts.sigProxy { sigc := dockerCli.ForwardAllSignals(ctx, createResponse.ID) defer signal.StopCatch(sigc) } var ( waitDisplayID chan struct{} errCh chan error ) if !config.AttachStdout && !config.AttachStderr { // Make this asynchronous to allow the client to write to stdin before having to read the ID waitDisplayID = make(chan struct{}) go func() { defer close(waitDisplayID) fmt.Fprintf(stdout, "%s\n", createResponse.ID) }() } if opts.autoRemove && (hostConfig.RestartPolicy.IsAlways() || hostConfig.RestartPolicy.IsOnFailure()) { return ErrConflictRestartPolicyAndAutoRemove } attach := config.AttachStdin || config.AttachStdout || config.AttachStderr if attach { var ( out, cerr io.Writer in io.ReadCloser ) if config.AttachStdin { in = stdin } if config.AttachStdout { out = stdout } if config.AttachStderr { if config.Tty { cerr = stdout } else { cerr = stderr } } if opts.detachKeys != "" { dockerCli.ConfigFile().DetachKeys = opts.detachKeys } options := types.ContainerAttachOptions{ Stream: true, Stdin: config.AttachStdin, Stdout: config.AttachStdout, Stderr: config.AttachStderr, DetachKeys: dockerCli.ConfigFile().DetachKeys, } resp, errAttach := client.ContainerAttach(ctx, createResponse.ID, options) if errAttach != nil && errAttach != httputil.ErrPersistEOF { // ContainerAttach returns an ErrPersistEOF (connection closed) // means server met an error and put it in Hijacked connection // keep the error and read detailed error message from hijacked connection later return errAttach } defer resp.Close() errCh = promise.Go(func() error { errHijack := dockerCli.HoldHijackedConnection(ctx, config.Tty, in, out, cerr, resp) if errHijack == nil { return errAttach } return errHijack }) } if opts.autoRemove { defer func() { // Explicitly not sharing the context as it could be "Done" (by calling cancelFun) // and thus the container would not be removed. if err := removeContainer(dockerCli, context.Background(), createResponse.ID, true, false, true); err != nil { fmt.Fprintf(stderr, "%v\n", err) } }() } //start the container if err := client.ContainerStart(ctx, createResponse.ID, types.ContainerStartOptions{}); err != nil { // If we have holdHijackedConnection, we should notify // holdHijackedConnection we are going to exit and wait // to avoid the terminal are not restored. if attach { cancelFun() <-errCh } reportError(stderr, cmdPath, err.Error(), false) return runStartContainerErr(err) } if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && dockerCli.IsTerminalOut() { if err := dockerCli.MonitorTtySize(ctx, createResponse.ID, false); err != nil { fmt.Fprintf(stderr, "Error monitoring TTY size: %s\n", err) } } if errCh != nil { if err := <-errCh; err != nil { logrus.Debugf("Error hijack: %s", err) return err } } // Detached mode: wait for the id to be displayed and return. if !config.AttachStdout && !config.AttachStderr { // Detached mode <-waitDisplayID return nil } var status int // Attached mode if opts.autoRemove { // Autoremove: wait for the container to finish, retrieve // the exit code and remove the container if status, err = client.ContainerWait(ctx, createResponse.ID); err != nil { return runStartContainerErr(err) } if _, status, err = dockerCli.GetExitCode(ctx, createResponse.ID); err != nil { return err } } else { // No Autoremove: Simply retrieve the exit code if !config.Tty { // In non-TTY mode, we can't detach, so we must wait for container exit if status, err = client.ContainerWait(ctx, createResponse.ID); err != nil { return err } } else { // In TTY mode, there is a race: if the process dies too slowly, the state could // be updated after the getExitCode call and result in the wrong exit code being reported if _, status, err = dockerCli.GetExitCode(ctx, createResponse.ID); err != nil { return err } } } if status != 0 { return cli.StatusError{StatusCode: status} } return nil }
func runExec(dockerCli *client.DockerCli, opts *execOptions, container string, execCmd []string) error { execConfig, err := parseExec(opts, container, execCmd) // just in case the ParseExec does not exit if container == "" || err != nil { return cli.StatusError{StatusCode: 1} } if opts.detachKeys != "" { dockerCli.ConfigFile().DetachKeys = opts.detachKeys } // Send client escape keys execConfig.DetachKeys = dockerCli.ConfigFile().DetachKeys ctx := context.Background() response, err := dockerCli.Client().ContainerExecCreate(ctx, container, *execConfig) if err != nil { return err } execID := response.ID if execID == "" { fmt.Fprintf(dockerCli.Out(), "exec ID empty") return nil } //Temp struct for execStart so that we don't need to transfer all the execConfig if !execConfig.Detach { if err := dockerCli.CheckTtyInput(execConfig.AttachStdin, execConfig.Tty); err != nil { return err } } else { execStartCheck := types.ExecStartCheck{ Detach: execConfig.Detach, Tty: execConfig.Tty, } if err := dockerCli.Client().ContainerExecStart(ctx, execID, execStartCheck); err != nil { return err } // For now don't print this - wait for when we support exec wait() // fmt.Fprintf(dockerCli.Out(), "%s\n", execID) return nil } // Interactive exec requested. var ( out, stderr io.Writer in io.ReadCloser errCh chan error ) if execConfig.AttachStdin { in = dockerCli.In() } if execConfig.AttachStdout { out = dockerCli.Out() } if execConfig.AttachStderr { if execConfig.Tty { stderr = dockerCli.Out() } else { stderr = dockerCli.Err() } } resp, err := dockerCli.Client().ContainerExecAttach(ctx, execID, *execConfig) if err != nil { return err } defer resp.Close() errCh = promise.Go(func() error { return dockerCli.HoldHijackedConnection(ctx, execConfig.Tty, in, out, stderr, resp) }) if execConfig.Tty && dockerCli.IsTerminalIn() { if err := dockerCli.MonitorTtySize(ctx, execID, true); err != nil { fmt.Fprintf(dockerCli.Err(), "Error monitoring TTY size: %s\n", err) } } if err := <-errCh; err != nil { logrus.Debugf("Error hijack: %s", err) return err } var status int if _, status, err = dockerCli.GetExecExitCode(ctx, execID); err != nil { return err } if status != 0 { return cli.StatusError{StatusCode: status} } return nil }