// DetectContextFromRemoteURL returns a context and in certain cases the name of the dockerfile to be used // irrespective of user input. // progressReader is only used if remoteURL is actually a URL (not empty, and not a Git endpoint). func DetectContextFromRemoteURL(r io.ReadCloser, remoteURL string, createProgressReader func(in io.ReadCloser) io.ReadCloser) (context builder.ModifiableContext, dockerfileName string, err error) { switch { case remoteURL == "": context, err = builder.MakeTarSumContext(r) case urlutil.IsGitURL(remoteURL): context, err = builder.MakeGitContext(remoteURL) case urlutil.IsURL(remoteURL): context, err = builder.MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){ httputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) { dockerfile, err := ioutil.ReadAll(rc) if err != nil { return nil, err } // dockerfileName is set to signal that the remote was interpreted as a single Dockerfile, in which case the caller // should use dockerfileName as the new name for the Dockerfile, irrespective of any other user input. dockerfileName = api.DefaultDockerfileName // TODO: return a context without tarsum return archive.Generate(dockerfileName, string(dockerfile)) }, // fallback handler (tar context) "": func(rc io.ReadCloser) (io.ReadCloser, error) { return createProgressReader(rc), nil }, }) default: err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL) } return }
// CmdImport creates an empty filesystem image, imports the contents of the tarball into the image, and optionally tags the image. // // The URL argument is the address of a tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) file or a path to local file relative to docker client. If the URL is '-', then the tar file is read from STDIN. // // Usage: docker import [OPTIONS] file|URL|- [REPOSITORY[:TAG]] func (cli *DockerCli) CmdImport(args ...string) error { cmd := Cli.Subcmd("import", []string{"file|URL|- [REPOSITORY[:TAG]]"}, Cli.DockerCommands["import"].Description, true) flChanges := opts.NewListOpts(nil) cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image") message := cmd.String([]string{"m", "-message"}, "", "Set commit message for imported image") cmd.Require(flag.Min, 1) cmd.ParseFlags(args, true) var ( in io.Reader tag string src = cmd.Arg(0) srcName = src repository = cmd.Arg(1) changes = flChanges.GetAll() ) if cmd.NArg() == 3 { fmt.Fprintf(cli.err, "[DEPRECATED] The format 'file|URL|- [REPOSITORY [TAG]]' has been deprecated. Please use file|URL|- [REPOSITORY[:TAG]]\n") tag = cmd.Arg(2) } if repository != "" { //Check if the given image name can be resolved if _, err := reference.ParseNamed(repository); err != nil { return err } } if src == "-" { in = cli.in } else if !urlutil.IsURL(src) { srcName = "-" file, err := os.Open(src) if err != nil { return err } defer file.Close() in = file } options := types.ImageImportOptions{ Source: in, SourceName: srcName, RepositoryName: repository, Message: *message, Tag: tag, Changes: changes, } responseBody, err := cli.client.ImageImport(options) if err != nil { return err } defer responseBody.Close() return jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut) }
// CmdImport creates an empty filesystem image, imports the contents of the tarball into the image, and optionally tags the image. // // The URL argument is the address of a tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) file or a path to local file relative to docker client. If the URL is '-', then the tar file is read from STDIN. // // Usage: docker import [OPTIONS] file|URL|- [REPOSITORY[:TAG]] func (cli *DockerCli) CmdImport(args ...string) error { cmd := Cli.Subcmd("import", []string{"file|URL|- [REPOSITORY[:TAG]]"}, Cli.DockerCommands["import"].Description, true) flChanges := opts.NewListOpts(nil) cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image") message := cmd.String([]string{"m", "-message"}, "", "Set commit message for imported image") cmd.Require(flag.Min, 1) cmd.ParseFlags(args, true) var ( v = url.Values{} src = cmd.Arg(0) repository = cmd.Arg(1) ) v.Set("fromSrc", src) v.Set("repo", repository) v.Set("message", *message) for _, change := range flChanges.GetAll() { v.Add("changes", change) } if cmd.NArg() == 3 { fmt.Fprintf(cli.err, "[DEPRECATED] The format 'file|URL|- [REPOSITORY [TAG]]' has been deprecated. Please use file|URL|- [REPOSITORY[:TAG]]\n") v.Set("tag", cmd.Arg(2)) } if repository != "" { //Check if the given image name can be resolved repo, _ := parsers.ParseRepositoryTag(repository) if err := registry.ValidateRepositoryName(repo); err != nil { return err } } var in io.Reader if src == "-" { in = cli.in } else if !urlutil.IsURL(src) { v.Set("fromSrc", "-") file, err := os.Open(src) if err != nil { return err } defer file.Close() in = file } sopts := &streamOpts{ rawTerminal: true, in: in, out: cli.out, } _, err := cli.stream("POST", "/images/create?"+v.Encode(), sopts) return err }
// CmdImport creates an empty filesystem image, imports the contents of the tarball into the image, and optionally tags the image. // // The URL argument is the address of a tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) file or a path to local file relative to docker client. If the URL is '-', then the tar file is read from STDIN. // // Usage: docker import [OPTIONS] file|URL|- [REPOSITORY[:TAG]] func (cli *DockerCli) CmdImport(args ...string) error { cmd := Cli.Subcmd("import", []string{"file|URL|- [REPOSITORY[:TAG]]"}, Cli.DockerCommands["import"].Description, true) flChanges := opts.NewListOpts(nil) cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image") message := cmd.String([]string{"m", "-message"}, "", "Set commit message for imported image") cmd.Require(flag.Min, 1) cmd.ParseFlags(args, true) var ( in io.Reader tag string src = cmd.Arg(0) srcName = src ref = cmd.Arg(1) changes = flChanges.GetAll() ) if cmd.NArg() == 3 { // FIXME(vdemeester) Which version has this been deprecated ? should we remove it ? fmt.Fprintf(cli.err, "[DEPRECATED] The format 'file|URL|- [REPOSITORY [TAG]]' has been deprecated. Please use file|URL|- [REPOSITORY[:TAG]]\n") tag = cmd.Arg(2) } if src == "-" { in = cli.in } else if !urlutil.IsURL(src) { srcName = "-" file, err := os.Open(src) if err != nil { return err } defer file.Close() in = file } source := types.ImageImportSource{ Source: in, SourceName: srcName, } options := types.ImageImportOptions{ Message: *message, Tag: tag, Changes: changes, } responseBody, err := cli.client.ImageImport(context.Background(), source, ref, options) if err != nil { return err } defer responseBody.Close() return jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut, nil) }
func runImport(dockerCli *client.DockerCli, opts importOptions) error { var ( in io.Reader srcName = opts.source ) if opts.source == "-" { in = dockerCli.In() } else if !urlutil.IsURL(opts.source) { srcName = "-" file, err := os.Open(opts.source) if err != nil { return err } defer file.Close() in = file } source := types.ImageImportSource{ Source: in, SourceName: srcName, } options := types.ImageImportOptions{ Message: opts.message, Changes: opts.changes, } clnt := dockerCli.Client() responseBody, err := clnt.ImageImport(context.Background(), source, opts.reference, options) if err != nil { return err } defer responseBody.Close() return jsonmessage.DisplayJSONMessagesStream(responseBody, dockerCli.Out(), dockerCli.OutFd(), dockerCli.IsTerminalOut(), nil) }
func parseURL(ctx logger.Context) (*url.URL, error) { splunkURLStr, ok := ctx.Config[splunkURLKey] if !ok { return nil, fmt.Errorf("%s: %s is expected", driverName, splunkURLKey) } splunkURL, err := url.Parse(splunkURLStr) if err != nil { return nil, fmt.Errorf("%s: failed to parse %s as url value in %s", driverName, splunkURLStr, splunkURLKey) } if !urlutil.IsURL(splunkURLStr) || !splunkURL.IsAbs() || (splunkURL.Path != "" && splunkURL.Path != "/") || splunkURL.RawQuery != "" || splunkURL.Fragment != "" { return nil, fmt.Errorf("%s: expected format scheme://dns_name_or_ip:port for %s", driverName, splunkURLKey) } splunkURL.Path = "/services/collector/event/1.0" return splunkURL, nil }
func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalDecompression bool, cmdName string) error { if b.context == nil { return fmt.Errorf("No context given. Impossible to use %s", cmdName) } if len(args) < 2 { return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) } // Work in daemon-specific filepath semantics dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest b.runConfig.Image = b.image var infos []copyInfo // Loop through each src file and calculate the info we need to // do the copy (e.g. hash value if cached). Don't actually do // the copy until we've looked at all src files var err error for _, orig := range args[0 : len(args)-1] { var fi builder.FileInfo decompress := allowLocalDecompression if urlutil.IsURL(orig) { if !allowRemote { return fmt.Errorf("Source can't be a URL for %s", cmdName) } fi, err = b.download(orig) if err != nil { return err } defer os.RemoveAll(filepath.Dir(fi.Path())) decompress = false infos = append(infos, copyInfo{fi, decompress}) continue } // not a URL subInfos, err := b.calcCopyInfo(cmdName, orig, allowLocalDecompression, true) if err != nil { return err } infos = append(infos, subInfos...) } if len(infos) == 0 { return fmt.Errorf("No source files were specified") } if len(infos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) { return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) } // For backwards compat, if there's just one info then use it as the // cache look-up string, otherwise hash 'em all into one var srcHash string var origPaths string if len(infos) == 1 { fi := infos[0].FileInfo origPaths = fi.Name() if hfi, ok := fi.(builder.Hashed); ok { srcHash = hfi.Hash() } } else { var hashs []string var origs []string for _, info := range infos { fi := info.FileInfo origs = append(origs, fi.Name()) if hfi, ok := fi.(builder.Hashed); ok { hashs = append(hashs, hfi.Hash()) } } hasher := sha256.New() hasher.Write([]byte(strings.Join(hashs, ","))) srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil)) origPaths = strings.Join(origs, " ") } cmd := b.runConfig.Cmd if runtime.GOOS != "windows" { b.runConfig.Cmd = strslice.New("/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest)) } else { b.runConfig.Cmd = strslice.New("cmd", "/S", "/C", fmt.Sprintf("REM (nop) %s %s in %s", cmdName, srcHash, dest)) } defer func(cmd *strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) if hit, err := b.probeCache(); err != nil { return err } else if hit { return nil } container, err := b.docker.ContainerCreate(types.ContainerCreateConfig{Config: b.runConfig}) if err != nil { return err } b.tmpContainers[container.ID] = struct{}{} comment := fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest) // Twiddle the destination when its a relative path - meaning, make it // relative to the WORKINGDIR if !system.IsAbs(dest) { hasSlash := strings.HasSuffix(dest, string(os.PathSeparator)) dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(b.runConfig.WorkingDir), dest) // Make sure we preserve any trailing slash if hasSlash { dest += string(os.PathSeparator) } } for _, info := range infos { if err := b.docker.BuilderCopy(container.ID, dest, info.FileInfo, info.decompress); err != nil { return err } } return b.commit(container.ID, cmd, comment) }
// CmdBuild builds a new image from the source code at a given path. // // If '-' is provided instead of a path or URL, Docker will build an image from either a Dockerfile or tar archive read from STDIN. // // Usage: docker build [OPTIONS] PATH | URL | - func (cli *DockerCli) CmdBuild(args ...string) error { cmd := cli.Subcmd("build", "PATH | URL | -", "Build a new image from the source code at PATH", true) tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) for the image") suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers") noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image") rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build") forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers") pull := cmd.Bool([]string{"-pull"}, false, "Always attempt to pull a newer version of the image") dockerfileName := cmd.String([]string{"f", "-file"}, "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')") flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit") flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Total memory (memory + swap), '-1' to disable swap") flCPUShares := cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") flCpuQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) quota") flCPUSetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") flCPUSetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)") flCgroupParent := cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container") cmd.Require(flag.Exact, 1) cmd.ParseFlags(args, true) var ( context archive.Archive isRemote bool err error ) _, err = exec.LookPath("git") hasGit := err == nil if cmd.Arg(0) == "-" { // As a special case, 'docker build -' will build from either an empty context with the // contents of stdin as a Dockerfile, or a tar-ed context from stdin. buf := bufio.NewReader(cli.in) magic, err := buf.Peek(tarHeaderSize) if err != nil && err != io.EOF { return fmt.Errorf("failed to peek context header from STDIN: %v", err) } if !archive.IsArchive(magic) { dockerfile, err := ioutil.ReadAll(buf) if err != nil { return fmt.Errorf("failed to read Dockerfile from STDIN: %v", err) } // -f option has no meaning when we're reading it from stdin, // so just use our default Dockerfile name *dockerfileName = api.DefaultDockerfileName context, err = archive.Generate(*dockerfileName, string(dockerfile)) } else { context = ioutil.NopCloser(buf) } } else if urlutil.IsURL(cmd.Arg(0)) && (!urlutil.IsGitURL(cmd.Arg(0)) || !hasGit) { isRemote = true } else { root := cmd.Arg(0) if urlutil.IsGitURL(root) { root, err = utils.GitClone(root) if err != nil { return err } defer os.RemoveAll(root) } if _, err := os.Stat(root); err != nil { return err } absRoot, err := filepath.Abs(root) if err != nil { return err } filename := *dockerfileName // path to Dockerfile if *dockerfileName == "" { // No -f/--file was specified so use the default *dockerfileName = api.DefaultDockerfileName filename = filepath.Join(absRoot, *dockerfileName) // Just to be nice ;-) look for 'dockerfile' too but only // use it if we found it, otherwise ignore this check if _, err = os.Lstat(filename); os.IsNotExist(err) { tmpFN := path.Join(absRoot, strings.ToLower(*dockerfileName)) if _, err = os.Lstat(tmpFN); err == nil { *dockerfileName = strings.ToLower(*dockerfileName) filename = tmpFN } } } origDockerfile := *dockerfileName // used for error msg if filename, err = filepath.Abs(filename); err != nil { return err } // Verify that 'filename' is within the build context filename, err = symlink.FollowSymlinkInScope(filename, absRoot) if err != nil { return fmt.Errorf("The Dockerfile (%s) must be within the build context (%s)", origDockerfile, root) } // Now reset the dockerfileName to be relative to the build context *dockerfileName, err = filepath.Rel(absRoot, filename) if err != nil { return err } // And canonicalize dockerfile name to a platform-independent one *dockerfileName, err = archive.CanonicalTarNameForPath(*dockerfileName) if err != nil { return fmt.Errorf("Cannot canonicalize dockerfile path %s: %v", *dockerfileName, err) } if _, err = os.Lstat(filename); os.IsNotExist(err) { return fmt.Errorf("Cannot locate Dockerfile: %s", origDockerfile) } var includes = []string{"."} excludes, err := utils.ReadDockerIgnore(path.Join(root, ".dockerignore")) if err != nil { return err } // If .dockerignore mentions .dockerignore or the Dockerfile // then make sure we send both files over to the daemon // because Dockerfile is, obviously, needed no matter what, and // .dockerignore is needed to know if either one needs to be // removed. The deamon will remove them for us, if needed, after it // parses the Dockerfile. keepThem1, _ := fileutils.Matches(".dockerignore", excludes) keepThem2, _ := fileutils.Matches(*dockerfileName, excludes) if keepThem1 || keepThem2 { includes = append(includes, ".dockerignore", *dockerfileName) } if err := utils.ValidateContextDirectory(root, excludes); err != nil { return fmt.Errorf("Error checking context is accessible: '%s'. Please check permissions and try again.", err) } options := &archive.TarOptions{ Compression: archive.Uncompressed, ExcludePatterns: excludes, IncludeFiles: includes, } context, err = archive.TarWithOptions(root, options) if err != nil { return err } } // windows: show error message about modified file permissions // FIXME: this is not a valid warning when the daemon is running windows. should be removed once docker engine for windows can build. if runtime.GOOS == "windows" { logrus.Warn(`SECURITY WARNING: You are building a Docker image from Windows against a Linux Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`) } var body io.Reader // Setup an upload progress bar // FIXME: ProgressReader shouldn't be this annoying to use if context != nil { sf := streamformatter.NewStreamFormatter(false) body = progressreader.New(progressreader.Config{ In: context, Out: cli.out, Formatter: sf, NewLines: true, ID: "", Action: "Sending build context to Docker daemon", }) } var memory int64 if *flMemoryString != "" { parsedMemory, err := units.RAMInBytes(*flMemoryString) if err != nil { return err } memory = parsedMemory } var memorySwap int64 if *flMemorySwap != "" { if *flMemorySwap == "-1" { memorySwap = -1 } else { parsedMemorySwap, err := units.RAMInBytes(*flMemorySwap) if err != nil { return err } memorySwap = parsedMemorySwap } } // Send the build context v := &url.Values{} //Check if the given image name can be resolved if *tag != "" { repository, tag := parsers.ParseRepositoryTag(*tag) if err := registry.ValidateRepositoryName(repository); err != nil { return err } if len(tag) > 0 { if err := graph.ValidateTagName(tag); err != nil { return err } } } v.Set("t", *tag) if *suppressOutput { v.Set("q", "1") } if isRemote { v.Set("remote", cmd.Arg(0)) } if *noCache { v.Set("nocache", "1") } if *rm { v.Set("rm", "1") } else { v.Set("rm", "0") } if *forceRm { v.Set("forcerm", "1") } if *pull { v.Set("pull", "1") } v.Set("cpusetcpus", *flCPUSetCpus) v.Set("cpusetmems", *flCPUSetMems) v.Set("cpushares", strconv.FormatInt(*flCPUShares, 10)) v.Set("cpuquota", strconv.FormatInt(*flCpuQuota, 10)) v.Set("memory", strconv.FormatInt(memory, 10)) v.Set("memswap", strconv.FormatInt(memorySwap, 10)) v.Set("cgroupparent", *flCgroupParent) v.Set("dockerfile", *dockerfileName) headers := http.Header(make(map[string][]string)) buf, err := json.Marshal(cli.configFile.AuthConfigs) if err != nil { return err } headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) if context != nil { headers.Set("Content-Type", "application/tar") } sopts := &streamOpts{ rawTerminal: true, in: body, out: cli.out, headers: headers, } err = cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), sopts) if jerr, ok := err.(*jsonmessage.JSONError); ok { // If no error code is set, default to 1 if jerr.Code == 0 { jerr.Code = 1 } return StatusError{Status: jerr.Message, StatusCode: jerr.Code} } return err }
func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalDecompression bool, cmdName string) error { if b.context == nil { return fmt.Errorf("No context given. Impossible to use %s", cmdName) } if len(args) < 2 { return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) } // Work in daemon-specific filepath semantics dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest b.runConfig.Image = b.image var infos []copyInfo // Loop through each src file and calculate the info we need to // do the copy (e.g. hash value if cached). Don't actually do // the copy until we've looked at all src files var err error for _, orig := range args[0 : len(args)-1] { var fi builder.FileInfo decompress := allowLocalDecompression if urlutil.IsURL(orig) { if !allowRemote { return fmt.Errorf("Source can't be a URL for %s", cmdName) } fi, err = b.download(orig) if err != nil { return err } defer os.RemoveAll(filepath.Dir(fi.Path())) decompress = false infos = append(infos, copyInfo{fi, decompress}) continue } // not a URL subInfos, err := b.calcCopyInfo(cmdName, orig, allowLocalDecompression, true) if err != nil { return err } infos = append(infos, subInfos...) } if len(infos) == 0 { return fmt.Errorf("No source files were specified") } if len(infos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) { return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) } // For backwards compat, if there's just one info then use it as the // cache look-up string, otherwise hash 'em all into one var srcHash string var origPaths string if len(infos) == 1 { fi := infos[0].FileInfo origPaths = fi.Name() if hfi, ok := fi.(builder.Hashed); ok { srcHash = hfi.Hash() } } else { var hashs []string var origs []string for _, info := range infos { fi := info.FileInfo origs = append(origs, fi.Name()) if hfi, ok := fi.(builder.Hashed); ok { hashs = append(hashs, hfi.Hash()) } } hasher := sha256.New() hasher.Write([]byte(strings.Join(hashs, ","))) srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil)) origPaths = strings.Join(origs, " ") } cmd := b.runConfig.Cmd if runtime.GOOS != "windows" { b.runConfig.Cmd = stringutils.NewStrSlice("/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest)) } else { b.runConfig.Cmd = stringutils.NewStrSlice("cmd", "/S", "/C", fmt.Sprintf("REM (nop) %s %s in %s", cmdName, srcHash, dest)) } defer func(cmd *stringutils.StrSlice) { b.runConfig.Cmd = cmd }(cmd) if hit, err := b.probeCache(); err != nil { return err } else if hit { return nil } // Create the Pod podId := fmt.Sprintf("buildpod-%s", utils.RandStr(10, "alpha")) tempSrcDir := fmt.Sprintf("/var/run/hyper/temp/%s/", podId) if err := os.MkdirAll(tempSrcDir, 0755); err != nil { glog.Errorf(err.Error()) return err } if _, err := os.Stat(tempSrcDir); err != nil { glog.Errorf(err.Error()) return err } shellDir := fmt.Sprintf("/var/run/hyper/shell/%s/", podId) if err := os.MkdirAll(shellDir, 0755); err != nil { glog.Errorf(err.Error()) return err } copyshell, err1 := os.Create(shellDir + "/exec-copy.sh") if err1 != nil { glog.Errorf(err1.Error()) return err1 } fmt.Fprintf(copyshell, "#!/bin/sh\n") podString, err := MakeCopyPod(podId, b.image, b.runConfig.WorkingDir, tempSrcDir, dest, shellDir) if err != nil { return err } err = b.Hyperdaemon.CreatePod(podId, podString, false) if err != nil { return err } // Get the container var ( containerId string = "" container *daemon.Container ) ps, ok := b.Hyperdaemon.PodList.GetStatus(podId) if !ok { return fmt.Errorf("Cannot find pod %s", podId) } for _, i := range ps.Containers { containerId = i.Id } container, err = b.Daemon.Get(containerId) if err != nil { glog.Error(err.Error()) return err } /* container, _, err := b.docker.Create(b.runConfig, nil) if err != nil { return err } defer b.docker.Unmount(container) */ b.tmpPods[podId] = struct{}{} b.tmpContainers[container.ID] = struct{}{} comment := fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest) // Twiddle the destination when its a relative path - meaning, make it // relative to the WORKINGDIR if !system.IsAbs(dest) { hasSlash := strings.HasSuffix(dest, string(os.PathSeparator)) dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(b.runConfig.WorkingDir), dest) // Make sure we preserve any trailing slash if hasSlash { dest += string(os.PathSeparator) } } for _, info := range infos { if err := b.docker.Copy(container, tempSrcDir, info.FileInfo, info.decompress); err != nil { return err } if strings.HasSuffix(dest, string(os.PathSeparator)) == true { fmt.Fprintf(copyshell, fmt.Sprintf("cp /tmp/src/%s %s\n", info.FileInfo.Name(), filepath.Join(dest, info.FileInfo.Name()))) } else { fmt.Fprintf(copyshell, fmt.Sprintf("cp /tmp/src/%s %s\n", info.FileInfo.Name(), dest)) } } fmt.Fprintf(copyshell, "umount /tmp/src/\n") fmt.Fprintf(copyshell, "umount /tmp/shell/\n") fmt.Fprintf(copyshell, "rm -rf /tmp/shell/\n") fmt.Fprintf(copyshell, "rm -rf /tmp/src/\n") copyshell.Close() // start or replace pod vm, ok := b.Hyperdaemon.VmList[b.Name] if !ok { glog.Warningf("can not find VM(%s)", b.Name) bo := &hypervisor.BootConfig{ CPU: 1, Memory: 512, Kernel: b.Hyperdaemon.Kernel, Initrd: b.Hyperdaemon.Initrd, Bios: b.Hyperdaemon.Bios, Cbfs: b.Hyperdaemon.Cbfs, Vbox: b.Hyperdaemon.VboxImage, } vm = b.Hyperdaemon.NewVm(b.Name, 1, 512, false, types.VM_KEEP_AFTER_FINISH) err = vm.Launch(bo) if err != nil { return err } b.Hyperdaemon.AddVm(vm) } if vm.Status == types.S_VM_IDLE { code, cause, err := b.Hyperdaemon.StartPod(podId, "", b.Name, nil, false, false, types.VM_KEEP_AFTER_FINISH, []*hypervisor.TtyIO{}) if err != nil { glog.Errorf("Code is %d, Cause is %s, %s", code, cause, err.Error()) b.Hyperdaemon.KillVm(b.Name) return err } vm = b.Hyperdaemon.VmList[b.Name] // wait for cmd finish Status, err := vm.GetResponseChan() if err != nil { glog.Error(err.Error()) return err } defer vm.ReleaseResponseChan(Status) var vmResponse *types.VmResponse for { vmResponse = <-Status if vmResponse.VmId == b.Name { if vmResponse.Code == types.E_POD_FINISHED { glog.Infof("Got E_POD_FINISHED code response") break } } } pod, ok := b.Hyperdaemon.PodList.Get(podId) if !ok { return fmt.Errorf("Cannot find pod %s", podId) } pod.SetVM(b.Name, vm) // release pod from VM glog.Warningf("start stop pod") code, cause, err = b.Hyperdaemon.StopPod(podId, "no") if err != nil { glog.Errorf("Code is %d, Cause is %s, %s", code, cause, err.Error()) b.Hyperdaemon.KillVm(b.Name) return err } glog.Warningf("stop pod finish") } else { glog.Errorf("Vm is not IDLE") return fmt.Errorf("Vm is not IDLE") } if err := b.commit(container.ID, cmd, comment); err != nil { return err } return nil }
// Build is the main interface of the package, it gathers the Builder // struct and calls builder.Run() to do all the real build job. func Build(d *daemon.Daemon, buildConfig *Config) error { var ( repoName string tag string context io.ReadCloser ) sf := streamformatter.NewJSONStreamFormatter() repoName, tag = parsers.ParseRepositoryTag(buildConfig.RepoName) if repoName != "" { if err := registry.ValidateRepositoryName(repoName); err != nil { return err } if len(tag) > 0 { if err := tags.ValidateTagName(tag); err != nil { return err } } } if buildConfig.RemoteURL == "" { context = ioutil.NopCloser(buildConfig.Context) } else if urlutil.IsGitURL(buildConfig.RemoteURL) { root, err := utils.GitClone(buildConfig.RemoteURL) if err != nil { return err } defer os.RemoveAll(root) c, err := archive.Tar(root, archive.Uncompressed) if err != nil { return err } context = c } else if urlutil.IsURL(buildConfig.RemoteURL) { f, err := httputils.Download(buildConfig.RemoteURL) if err != nil { return fmt.Errorf("Error downloading remote context %s: %v", buildConfig.RemoteURL, err) } defer f.Body.Close() ct := f.Header.Get("Content-Type") clen := int(f.ContentLength) contentType, bodyReader, err := inspectResponse(ct, f.Body, clen) defer bodyReader.Close() if err != nil { return fmt.Errorf("Error detecting content type for remote %s: %v", buildConfig.RemoteURL, err) } if contentType == httputils.MimeTypes.TextPlain { dockerFile, err := ioutil.ReadAll(bodyReader) if err != nil { return err } // When we're downloading just a Dockerfile put it in // the default name - don't allow the client to move/specify it buildConfig.DockerfileName = api.DefaultDockerfileName c, err := archive.Generate(buildConfig.DockerfileName, string(dockerFile)) if err != nil { return err } context = c } else { // Pass through - this is a pre-packaged context, presumably // with a Dockerfile with the right name inside it. prCfg := progressreader.Config{ In: bodyReader, Out: buildConfig.Stdout, Formatter: sf, Size: clen, NewLines: true, ID: "Downloading context", Action: buildConfig.RemoteURL, } context = progressreader.New(prCfg) } } defer context.Close() builder := &builder{ Daemon: d, OutStream: &streamformatter.StdoutFormater{ Writer: buildConfig.Stdout, StreamFormatter: sf, }, ErrStream: &streamformatter.StderrFormater{ Writer: buildConfig.Stdout, StreamFormatter: sf, }, Verbose: !buildConfig.SuppressOutput, UtilizeCache: !buildConfig.NoCache, Remove: buildConfig.Remove, ForceRemove: buildConfig.ForceRemove, Pull: buildConfig.Pull, OutOld: buildConfig.Stdout, StreamFormatter: sf, AuthConfigs: buildConfig.AuthConfigs, dockerfileName: buildConfig.DockerfileName, cpuShares: buildConfig.CPUShares, cpuPeriod: buildConfig.CPUPeriod, cpuQuota: buildConfig.CPUQuota, cpuSetCpus: buildConfig.CPUSetCpus, cpuSetMems: buildConfig.CPUSetMems, cgroupParent: buildConfig.CgroupParent, memory: buildConfig.Memory, memorySwap: buildConfig.MemorySwap, cancelled: buildConfig.WaitCancelled(), id: stringid.GenerateRandomID(), } defer func() { builder.Daemon.Graph().Release(builder.id, builder.activeImages...) }() id, err := builder.Run(context) if err != nil { return err } if repoName != "" { return d.Repositories().Tag(repoName, tag, id, true) } return nil }
func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status { if len(job.Args) != 0 { return job.Errorf("Usage: %s\n", job.Name) } var ( dockerfileName = job.Getenv("dockerfile") remoteURL = job.Getenv("remote") repoName = job.Getenv("t") suppressOutput = job.GetenvBool("q") noCache = job.GetenvBool("nocache") rm = job.GetenvBool("rm") forceRm = job.GetenvBool("forcerm") pull = job.GetenvBool("pull") authConfig = ®istry.AuthConfig{} configFile = ®istry.ConfigFile{} tag string context io.ReadCloser ) job.GetenvJson("authConfig", authConfig) job.GetenvJson("configFile", configFile) repoName, tag = parsers.ParseRepositoryTag(repoName) if repoName != "" { if err := registry.ValidateRepositoryName(repoName); err != nil { return job.Error(err) } if len(tag) > 0 { if err := graph.ValidateTagName(tag); err != nil { return job.Error(err) } } } if remoteURL == "" { context = ioutil.NopCloser(job.Stdin) } else if urlutil.IsGitURL(remoteURL) { if !urlutil.IsGitTransport(remoteURL) { remoteURL = "https://" + remoteURL } root, err := ioutil.TempDir("", "docker-build-git") if err != nil { return job.Error(err) } defer os.RemoveAll(root) if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { return job.Errorf("Error trying to use git: %s (%s)", err, output) } c, err := archive.Tar(root, archive.Uncompressed) if err != nil { return job.Error(err) } context = c } else if urlutil.IsURL(remoteURL) { f, err := utils.Download(remoteURL) if err != nil { return job.Error(err) } defer f.Body.Close() dockerFile, err := ioutil.ReadAll(f.Body) if err != nil { return job.Error(err) } // When we're downloading just a Dockerfile put it in // the default name - don't allow the client to move/specify it dockerfileName = api.DefaultDockerfileName c, err := archive.Generate(dockerfileName, string(dockerFile)) if err != nil { return job.Error(err) } context = c } defer context.Close() sf := utils.NewStreamFormatter(job.GetenvBool("json")) builder := &Builder{ Daemon: b.Daemon, Engine: b.Engine, OutStream: &utils.StdoutFormater{ Writer: job.Stdout, StreamFormatter: sf, }, ErrStream: &utils.StderrFormater{ Writer: job.Stdout, StreamFormatter: sf, }, Verbose: !suppressOutput, UtilizeCache: !noCache, Remove: rm, ForceRemove: forceRm, Pull: pull, OutOld: job.Stdout, StreamFormatter: sf, AuthConfig: authConfig, AuthConfigFile: configFile, dockerfileName: dockerfileName, } id, err := builder.Run(context) if err != nil { return job.Error(err) } if repoName != "" { b.Daemon.Repositories().Set(repoName, tag, id, true) } return engine.StatusOK }
func runBuild(dockerCli *command.DockerCli, options buildOptions) error { var ( buildCtx io.ReadCloser err error ) specifiedContext := options.context var ( contextDir string tempDir string relDockerfile string progBuff io.Writer buildBuff io.Writer ) progBuff = dockerCli.Out() buildBuff = dockerCli.Out() if options.quiet { progBuff = bytes.NewBuffer(nil) buildBuff = bytes.NewBuffer(nil) } switch { case specifiedContext == "-": buildCtx, relDockerfile, err = builder.GetContextFromReader(dockerCli.In(), options.dockerfileName) case urlutil.IsGitURL(specifiedContext): tempDir, relDockerfile, err = builder.GetContextFromGitURL(specifiedContext, options.dockerfileName) case urlutil.IsURL(specifiedContext): buildCtx, relDockerfile, err = builder.GetContextFromURL(progBuff, specifiedContext, options.dockerfileName) default: contextDir, relDockerfile, err = builder.GetContextFromLocalDir(specifiedContext, options.dockerfileName) } if err != nil { if options.quiet && urlutil.IsURL(specifiedContext) { fmt.Fprintln(dockerCli.Err(), progBuff) } return fmt.Errorf("unable to prepare context: %s", err) } if tempDir != "" { defer os.RemoveAll(tempDir) contextDir = tempDir } if buildCtx == nil { // And canonicalize dockerfile name to a platform-independent one relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile) if err != nil { return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err) } f, err := os.Open(filepath.Join(contextDir, ".dockerignore")) if err != nil && !os.IsNotExist(err) { return err } defer f.Close() var excludes []string if err == nil { excludes, err = dockerignore.ReadAll(f) if err != nil { return err } } if err := builder.ValidateContextDirectory(contextDir, excludes); err != nil { return fmt.Errorf("Error checking context: '%s'.", err) } // If .dockerignore mentions .dockerignore or the Dockerfile // then make sure we send both files over to the daemon // because Dockerfile is, obviously, needed no matter what, and // .dockerignore is needed to know if either one needs to be // removed. The daemon will remove them for us, if needed, after it // parses the Dockerfile. Ignore errors here, as they will have been // caught by validateContextDirectory above. var includes = []string{"."} keepThem1, _ := fileutils.Matches(".dockerignore", excludes) keepThem2, _ := fileutils.Matches(relDockerfile, excludes) if keepThem1 || keepThem2 { includes = append(includes, ".dockerignore", relDockerfile) } buildCtx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{ Compression: archive.Uncompressed, ExcludePatterns: excludes, IncludeFiles: includes, }) if err != nil { return err } } ctx := context.Background() var resolvedTags []*resolvedTag if command.IsTrusted() { translator := func(ctx context.Context, ref reference.NamedTagged) (reference.Canonical, error) { return TrustedReference(ctx, dockerCli, ref) } // Wrap the tar archive to replace the Dockerfile entry with the rewritten // Dockerfile which uses trusted pulls. buildCtx = replaceDockerfileTarWrapper(ctx, buildCtx, relDockerfile, translator, &resolvedTags) } // Setup an upload progress bar progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(progBuff, true) if !dockerCli.Out().IsTerminal() { progressOutput = &lastProgressOutput{output: progressOutput} } var body io.Reader = progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon") var memory int64 if options.memory != "" { parsedMemory, err := units.RAMInBytes(options.memory) if err != nil { return err } memory = parsedMemory } var memorySwap int64 if options.memorySwap != "" { if options.memorySwap == "-1" { memorySwap = -1 } else { parsedMemorySwap, err := units.RAMInBytes(options.memorySwap) if err != nil { return err } memorySwap = parsedMemorySwap } } var shmSize int64 if options.shmSize != "" { shmSize, err = units.RAMInBytes(options.shmSize) if err != nil { return err } } authConfig, _ := dockerCli.CredentialsStore().GetAll() buildOptions := types.ImageBuildOptions{ Memory: memory, MemorySwap: memorySwap, Tags: options.tags.GetAll(), SuppressOutput: options.quiet, NoCache: options.noCache, Remove: options.rm, ForceRemove: options.forceRm, PullParent: options.pull, Isolation: container.Isolation(options.isolation), CPUSetCPUs: options.cpuSetCpus, CPUSetMems: options.cpuSetMems, CPUShares: options.cpuShares, CPUQuota: options.cpuQuota, CPUPeriod: options.cpuPeriod, CgroupParent: options.cgroupParent, Dockerfile: relDockerfile, ShmSize: shmSize, Ulimits: options.ulimits.GetList(), BuildArgs: runconfigopts.ConvertKVStringsToMap(options.buildArgs.GetAll()), AuthConfigs: authConfig, Labels: runconfigopts.ConvertKVStringsToMap(options.labels.GetAll()), CacheFrom: options.cacheFrom, } response, err := dockerCli.Client().ImageBuild(ctx, body, buildOptions) if err != nil { if options.quiet { fmt.Fprintf(dockerCli.Err(), "%s", progBuff) } return err } defer response.Body.Close() err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, dockerCli.Out().FD(), dockerCli.Out().IsTerminal(), nil) if err != nil { if jerr, ok := err.(*jsonmessage.JSONError); ok { // If no error code is set, default to 1 if jerr.Code == 0 { jerr.Code = 1 } if options.quiet { fmt.Fprintf(dockerCli.Err(), "%s%s", progBuff, buildBuff) } return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code} } } // Windows: show error message about modified file permissions if the // daemon isn't running Windows. if response.OSType != "windows" && runtime.GOOS == "windows" && !options.quiet { fmt.Fprintln(dockerCli.Err(), `SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`) } // Everything worked so if -q was provided the output from the daemon // should be just the image ID and we'll print that to stdout. if options.quiet { fmt.Fprintf(dockerCli.Out(), "%s", buildBuff) } if command.IsTrusted() { // Since the build was successful, now we must tag any of the resolved // images from the above Dockerfile rewrite. for _, resolved := range resolvedTags { if err := TagTrusted(ctx, dockerCli, resolved.digestRef, resolved.tagRef); err != nil { return err } } } return nil }
func Build(d *daemon.Daemon, buildConfig *Config) error { var ( repoName string tag string context io.ReadCloser ) repoName, tag = parsers.ParseRepositoryTag(buildConfig.RepoName) if repoName != "" { if err := registry.ValidateRepositoryName(repoName); err != nil { return err } if len(tag) > 0 { if err := tags.ValidateTagName(tag); err != nil { return err } } } if buildConfig.RemoteURL == "" { context = ioutil.NopCloser(buildConfig.Context) } else if urlutil.IsGitURL(buildConfig.RemoteURL) { root, err := utils.GitClone(buildConfig.RemoteURL) if err != nil { return err } defer os.RemoveAll(root) c, err := archive.Tar(root, archive.Uncompressed) if err != nil { return err } context = c } else if urlutil.IsURL(buildConfig.RemoteURL) { f, err := httputils.Download(buildConfig.RemoteURL) if err != nil { return err } defer f.Body.Close() dockerFile, err := ioutil.ReadAll(f.Body) if err != nil { return err } // When we're downloading just a Dockerfile put it in // the default name - don't allow the client to move/specify it buildConfig.DockerfileName = api.DefaultDockerfileName c, err := archive.Generate(buildConfig.DockerfileName, string(dockerFile)) if err != nil { return err } context = c } defer context.Close() sf := streamformatter.NewJSONStreamFormatter() builder := &Builder{ Daemon: d, OutStream: &streamformatter.StdoutFormater{ Writer: buildConfig.Stdout, StreamFormatter: sf, }, ErrStream: &streamformatter.StderrFormater{ Writer: buildConfig.Stdout, StreamFormatter: sf, }, Verbose: !buildConfig.SuppressOutput, UtilizeCache: !buildConfig.NoCache, Remove: buildConfig.Remove, ForceRemove: buildConfig.ForceRemove, Pull: buildConfig.Pull, OutOld: buildConfig.Stdout, StreamFormatter: sf, AuthConfig: buildConfig.AuthConfig, ConfigFile: buildConfig.ConfigFile, dockerfileName: buildConfig.DockerfileName, cpuShares: buildConfig.CpuShares, cpuPeriod: buildConfig.CpuPeriod, cpuQuota: buildConfig.CpuQuota, cpuSetCpus: buildConfig.CpuSetCpus, cpuSetMems: buildConfig.CpuSetMems, cgroupParent: buildConfig.CgroupParent, memory: buildConfig.Memory, memorySwap: buildConfig.MemorySwap, cancelled: buildConfig.WaitCancelled(), } id, err := builder.Run(context) if err != nil { return err } if repoName != "" { return d.Repositories().Tag(repoName, tag, id, true) } return nil }
func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalDecompression bool, cmdName string) error { if b.context == nil { return fmt.Errorf("No context given. Impossible to use %s", cmdName) } if len(args) < 2 { return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) } // Work in daemon-specific filepath semantics dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest b.runConfig.Image = b.image var infos []copyInfo // Loop through each src file and calculate the info we need to // do the copy (e.g. hash value if cached). Don't actually do // the copy until we've looked at all src files var err error for _, orig := range args[0 : len(args)-1] { var fi builder.FileInfo decompress := allowLocalDecompression if urlutil.IsURL(orig) { if !allowRemote { return fmt.Errorf("Source can't be a URL for %s", cmdName) } fi, err = b.download(orig) if err != nil { return err } defer os.RemoveAll(filepath.Dir(fi.Path())) decompress = false infos = append(infos, copyInfo{fi, decompress}) continue } // not a URL subInfos, err := b.calcCopyInfo(cmdName, orig, allowLocalDecompression, true) if err != nil { return err } infos = append(infos, subInfos...) } if len(infos) == 0 { return fmt.Errorf("No source files were specified") } if len(infos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) { return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) } // For backwards compat, if there's just one info then use it as the // cache look-up string, otherwise hash 'em all into one var srcHash string var origPaths string if len(infos) == 1 { fi := infos[0].FileInfo origPaths = fi.Name() if hfi, ok := fi.(builder.Hashed); ok { srcHash = hfi.Hash() } } else { var hashs []string var origs []string for _, info := range infos { fi := info.FileInfo origs = append(origs, fi.Name()) if hfi, ok := fi.(builder.Hashed); ok { hashs = append(hashs, hfi.Hash()) } } hasher := sha256.New() hasher.Write([]byte(strings.Join(hashs, ","))) srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil)) origPaths = strings.Join(origs, " ") } cmd := b.runConfig.Cmd if runtime.GOOS != "windows" { b.runConfig.Cmd = strslice.StrSlice{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest)} } else { b.runConfig.Cmd = strslice.StrSlice{"cmd", "/S", "/C", fmt.Sprintf("REM (nop) %s %s in %s", cmdName, srcHash, dest)} } defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) if hit, err := b.probeCache(); err != nil { return err } else if hit { return nil } container, err := b.docker.ContainerCreate(types.ContainerCreateConfig{Config: b.runConfig}) if err != nil { return err } b.tmpContainers[container.ID] = struct{}{} comment := fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest) // Twiddle the destination when its a relative path - meaning, make it // relative to the WORKINGDIR endsInSlash := strings.HasSuffix(dest, string(os.PathSeparator)) if runtime.GOOS == "windows" { // On Windows, this is more complicated. We are guaranteed that the // WorkingDir is already platform consistent meaning in the format // UPPERCASEDriveLetter-Colon-Backslash-Foldername. However, Windows // for now also has the limitation that ADD/COPY can only be done to // the C: (system) drive, not any drives that might be present as a // result of bind mounts. // // So... if the path specified is Linux-style absolute (/foo or \\foo), // we assume it is the system drive. If it is a Windows-style absolute // (DRIVE:\\foo), error if DRIVE is not C. And finally, ensure we // strip any configured working directories drive letter so that it // can be subsequently legitimately converted to a Windows volume-style // pathname. // Not a typo - filepath.IsAbs, not system.IsAbs on this next check as // we only want to validate where the DriveColon part has been supplied. if filepath.IsAbs(dest) { if strings.ToUpper(string(dest[0])) != "C" { return fmt.Errorf("Windows does not support %s with a destinations not on the system drive (C:)", cmdName) } dest = dest[2:] // Strip the drive letter } // Cannot handle relative where WorkingDir is not the system drive. if len(b.runConfig.WorkingDir) > 0 { if !system.IsAbs(b.runConfig.WorkingDir[2:]) { return fmt.Errorf("Current WorkingDir %s is not platform consistent", b.runConfig.WorkingDir) } if !system.IsAbs(dest) { if string(b.runConfig.WorkingDir[0]) != "C" { return fmt.Errorf("Windows does not support %s with relative paths when WORKDIR is not the system drive", cmdName) } dest = filepath.Join(string(os.PathSeparator), b.runConfig.WorkingDir[2:], dest) // Make sure we preserve any trailing slash if endsInSlash { dest += string(os.PathSeparator) } } } } else { if !system.IsAbs(dest) { dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(b.runConfig.WorkingDir), dest) // Make sure we preserve any trailing slash if endsInSlash { dest += string(os.PathSeparator) } } } for _, info := range infos { if err := b.docker.CopyOnBuild(container.ID, dest, info.FileInfo, info.decompress); err != nil { return err } } return b.commit(container.ID, cmd, comment) }
// IsValidRemote checks if the specified string is a valid remote (for builds) func IsValidRemote(remote string) bool { return urlutil.IsGitURL(remote) || urlutil.IsURL(remote) }
func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool) error { if origPath != "" && origPath[0] == '/' && len(origPath) > 1 { origPath = origPath[1:] } origPath = strings.TrimPrefix(origPath, "./") // Twiddle the destPath when its a relative path - meaning, make it // relative to the WORKINGDIR if !filepath.IsAbs(destPath) { hasSlash := strings.HasSuffix(destPath, "/") destPath = filepath.Join("/", b.Config.WorkingDir, destPath) // Make sure we preserve any trailing slash if hasSlash { destPath += "/" } } // In the remote/URL case, download it and gen its hashcode if urlutil.IsURL(origPath) { if !allowRemote { return fmt.Errorf("Source can't be a URL for %s", cmdName) } ci := copyInfo{} ci.origPath = origPath ci.hash = origPath // default to this but can change ci.destPath = destPath ci.decompress = false *cInfos = append(*cInfos, &ci) // Initiate the download resp, err := utils.Download(ci.origPath) if err != nil { return err } // Create a tmp dir tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote") if err != nil { return err } ci.tmpDir = tmpDirName // Create a tmp file within our tmp dir tmpFileName := path.Join(tmpDirName, "tmp") tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) if err != nil { return err } // Download and dump result to tmp file if _, err := io.Copy(tmpFile, utils.ProgressReader(resp.Body, int(resp.ContentLength), b.OutOld, b.StreamFormatter, true, "", "Downloading")); err != nil { tmpFile.Close() return err } fmt.Fprintf(b.OutStream, "\n") tmpFile.Close() // Set the mtime to the Last-Modified header value if present // Otherwise just remove atime and mtime times := make([]syscall.Timespec, 2) lastMod := resp.Header.Get("Last-Modified") if lastMod != "" { mTime, err := http.ParseTime(lastMod) // If we can't parse it then just let it default to 'zero' // otherwise use the parsed time value if err == nil { times[1] = syscall.NsecToTimespec(mTime.UnixNano()) } } if err := system.UtimesNano(tmpFileName, times); err != nil { return err } ci.origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) // If the destination is a directory, figure out the filename. if strings.HasSuffix(ci.destPath, "/") { u, err := url.Parse(origPath) if err != nil { return err } path := u.Path if strings.HasSuffix(path, "/") { path = path[:len(path)-1] } parts := strings.Split(path, "/") filename := parts[len(parts)-1] if filename == "" { return fmt.Errorf("cannot determine filename from url: %s", u) } ci.destPath = ci.destPath + filename } // Calc the checksum, even if we're using the cache r, err := archive.Tar(tmpFileName, archive.Uncompressed) if err != nil { return err } tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version0) if err != nil { return err } if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { return err } ci.hash = tarSum.Sum(nil) r.Close() return nil } // Deal with wildcards if ContainsWildcards(origPath) { for _, fileInfo := range b.context.GetSums() { if fileInfo.Name() == "" { continue } match, _ := path.Match(origPath, fileInfo.Name()) if !match { continue } calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression) } return nil } // Must be a dir or a file if err := b.checkPathForAddition(origPath); err != nil { return err } fi, _ := os.Stat(path.Join(b.contextPath, origPath)) ci := copyInfo{} ci.origPath = origPath ci.hash = origPath ci.destPath = destPath ci.decompress = allowDecompression *cInfos = append(*cInfos, &ci) // Deal with the single file case if !fi.IsDir() { // This will match first file in sums of the archive fis := b.context.GetSums().GetFile(ci.origPath) if fis != nil { ci.hash = "file:" + fis.Sum() } return nil } // Must be a dir var subfiles []string absOrigPath := path.Join(b.contextPath, ci.origPath) // Add a trailing / to make sure we only pick up nested files under // the dir and not sibling files of the dir that just happen to // start with the same chars if !strings.HasSuffix(absOrigPath, "/") { absOrigPath += "/" } // Need path w/o / too to find matching dir w/o trailing / absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1] for _, fileInfo := range b.context.GetSums() { absFile := path.Join(b.contextPath, fileInfo.Name()) // Any file in the context that starts with the given path will be // picked up and its hashcode used. However, we'll exclude the // root dir itself. We do this for a coupel of reasons: // 1 - ADD/COPY will not copy the dir itself, just its children // so there's no reason to include it in the hash calc // 2 - the metadata on the dir will change when any child file // changes. This will lead to a miss in the cache check if that // child file is in the .dockerignore list. if strings.HasPrefix(absFile, absOrigPath) && absFile != absOrigPathNoSlash { subfiles = append(subfiles, fileInfo.Sum()) } } sort.Strings(subfiles) hasher := sha256.New() hasher.Write([]byte(strings.Join(subfiles, ","))) ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil)) return nil }
// CmdBuild builds a new image from the source code at a given path. // // If '-' is provided instead of a path or URL, Docker will build an image from either a Dockerfile or tar archive read from STDIN. // // Usage: docker build [OPTIONS] PATH | URL | - func (cli *DockerCli) CmdBuild(args ...string) error { cmd := Cli.Subcmd("build", []string{"PATH | URL | -"}, Cli.DockerCommands["build"].Description, true) flTags := opts.NewListOpts(validateTag) cmd.Var(&flTags, []string{"t", "-tag"}, "Name and optionally a tag in the 'name:tag' format") suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the build output and print image ID on success") noCache := cmd.Bool([]string{"-no-cache"}, false, "Do not use cache when building the image") rm := cmd.Bool([]string{"-rm"}, true, "Remove intermediate containers after a successful build") forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers") pull := cmd.Bool([]string{"-pull"}, false, "Always attempt to pull a newer version of the image") dockerfileName := cmd.String([]string{"f", "-file"}, "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')") flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit") flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") flShmSize := cmd.String([]string{"-shm-size"}, "", "Size of /dev/shm, default value is 64MB") flCPUShares := cmd.Int64([]string{"#c", "-cpu-shares"}, 0, "CPU shares (relative weight)") flCPUPeriod := cmd.Int64([]string{"-cpu-period"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) period") flCPUQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) quota") flCPUSetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") flCPUSetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)") flCgroupParent := cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container") flBuildArg := opts.NewListOpts(runconfigopts.ValidateEnv) cmd.Var(&flBuildArg, []string{"-build-arg"}, "Set build-time variables") isolation := cmd.String([]string{"-isolation"}, "", "Container isolation level") ulimits := make(map[string]*units.Ulimit) flUlimits := runconfigopts.NewUlimitOpt(&ulimits) cmd.Var(flUlimits, []string{"-ulimit"}, "Ulimit options") cmd.Require(flag.Exact, 1) // For trusted pull on "FROM <image>" instruction. addTrustedFlags(cmd, true) cmd.ParseFlags(args, true) var ( context io.ReadCloser isRemote bool err error ) _, err = exec.LookPath("git") hasGit := err == nil specifiedContext := cmd.Arg(0) var ( contextDir string tempDir string relDockerfile string progBuff io.Writer buildBuff io.Writer ) progBuff = cli.out buildBuff = cli.out if *suppressOutput { progBuff = bytes.NewBuffer(nil) buildBuff = bytes.NewBuffer(nil) } switch { case specifiedContext == "-": context, relDockerfile, err = getContextFromReader(cli.in, *dockerfileName) case urlutil.IsGitURL(specifiedContext) && hasGit: tempDir, relDockerfile, err = getContextFromGitURL(specifiedContext, *dockerfileName) case urlutil.IsURL(specifiedContext): context, relDockerfile, err = getContextFromURL(progBuff, specifiedContext, *dockerfileName) default: contextDir, relDockerfile, err = getContextFromLocalDir(specifiedContext, *dockerfileName) } if err != nil { if *suppressOutput && urlutil.IsURL(specifiedContext) { fmt.Fprintln(cli.err, progBuff) } return fmt.Errorf("unable to prepare context: %s", err) } if tempDir != "" { defer os.RemoveAll(tempDir) contextDir = tempDir } if context == nil { // And canonicalize dockerfile name to a platform-independent one relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile) if err != nil { return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err) } f, err := os.Open(filepath.Join(contextDir, ".dockerignore")) if err != nil && !os.IsNotExist(err) { return err } var excludes []string if err == nil { excludes, err = dockerignore.ReadAll(f) if err != nil { return err } } if err := validateContextDirectory(contextDir, excludes); err != nil { return fmt.Errorf("Error checking context: '%s'.", err) } // If .dockerignore mentions .dockerignore or the Dockerfile // then make sure we send both files over to the daemon // because Dockerfile is, obviously, needed no matter what, and // .dockerignore is needed to know if either one needs to be // removed. The daemon will remove them for us, if needed, after it // parses the Dockerfile. Ignore errors here, as they will have been // caught by validateContextDirectory above. var includes = []string{"."} keepThem1, _ := fileutils.Matches(".dockerignore", excludes) keepThem2, _ := fileutils.Matches(relDockerfile, excludes) if keepThem1 || keepThem2 { includes = append(includes, ".dockerignore", relDockerfile) } context, err = archive.TarWithOptions(contextDir, &archive.TarOptions{ Compression: archive.Uncompressed, ExcludePatterns: excludes, IncludeFiles: includes, }) if err != nil { return err } } var resolvedTags []*resolvedTag if isTrusted() { // Wrap the tar archive to replace the Dockerfile entry with the rewritten // Dockerfile which uses trusted pulls. context = replaceDockerfileTarWrapper(context, relDockerfile, cli.trustedReference, &resolvedTags) } // Setup an upload progress bar progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(progBuff, true) var body io.Reader = progress.NewProgressReader(context, progressOutput, 0, "", "Sending build context to Docker daemon") var memory int64 if *flMemoryString != "" { parsedMemory, err := units.RAMInBytes(*flMemoryString) if err != nil { return err } memory = parsedMemory } var memorySwap int64 if *flMemorySwap != "" { if *flMemorySwap == "-1" { memorySwap = -1 } else { parsedMemorySwap, err := units.RAMInBytes(*flMemorySwap) if err != nil { return err } memorySwap = parsedMemorySwap } } var shmSize int64 if *flShmSize != "" { shmSize, err = units.RAMInBytes(*flShmSize) if err != nil { return err } } var remoteContext string if isRemote { remoteContext = cmd.Arg(0) } options := types.ImageBuildOptions{ Context: body, Memory: memory, MemorySwap: memorySwap, Tags: flTags.GetAll(), SuppressOutput: *suppressOutput, RemoteContext: remoteContext, NoCache: *noCache, Remove: *rm, ForceRemove: *forceRm, PullParent: *pull, IsolationLevel: container.IsolationLevel(*isolation), CPUSetCPUs: *flCPUSetCpus, CPUSetMems: *flCPUSetMems, CPUShares: *flCPUShares, CPUQuota: *flCPUQuota, CPUPeriod: *flCPUPeriod, CgroupParent: *flCgroupParent, Dockerfile: relDockerfile, ShmSize: shmSize, Ulimits: flUlimits.GetList(), BuildArgs: runconfigopts.ConvertKVStringsToMap(flBuildArg.GetAll()), AuthConfigs: cli.configFile.AuthConfigs, } response, err := cli.client.ImageBuild(options) if err != nil { return err } err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, cli.outFd, cli.isTerminalOut, nil) if err != nil { if jerr, ok := err.(*jsonmessage.JSONError); ok { // If no error code is set, default to 1 if jerr.Code == 0 { jerr.Code = 1 } if *suppressOutput { fmt.Fprintf(cli.err, "%s%s", progBuff, buildBuff) } return Cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code} } } // Windows: show error message about modified file permissions if the // daemon isn't running Windows. if response.OSType != "windows" && runtime.GOOS == "windows" { fmt.Fprintln(cli.err, `SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`) } // Everything worked so if -q was provided the output from the daemon // should be just the image ID and we'll print that to stdout. if *suppressOutput { fmt.Fprintf(cli.out, "%s", buildBuff) } if isTrusted() { // Since the build was successful, now we must tag any of the resolved // images from the above Dockerfile rewrite. for _, resolved := range resolvedTags { if err := cli.tagTrusted(resolved.digestRef, resolved.tagRef); err != nil { return err } } } return nil }
// CmdBuild builds a new image from the source code at a given path. // // If '-' is provided instead of a path or URL, Docker will build an image from either a Dockerfile or tar archive read from STDIN. // // Usage: docker build [OPTIONS] PATH | URL | - func (cli *DockerCli) CmdBuild(args ...string) error { cmd := Cli.Subcmd("build", []string{"PATH | URL | -"}, Cli.DockerCommands["build"].Description, true) flTags := opts.NewListOpts(validateTag) cmd.Var(&flTags, []string{"t", "-tag"}, "Name and optionally a tag in the 'name:tag' format") suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers") noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image") rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build") forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers") pull := cmd.Bool([]string{"-pull"}, false, "Always attempt to pull a newer version of the image") dockerfileName := cmd.String([]string{"f", "-file"}, "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')") flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit") flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Total memory (memory + swap), '-1' to disable swap") flCPUShares := cmd.Int64([]string{"#c", "-cpu-shares"}, 0, "CPU shares (relative weight)") flCPUPeriod := cmd.Int64([]string{"-cpu-period"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) period") flCPUQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) quota") flCPUSetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") flCPUSetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)") flCgroupParent := cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container") flBuildArg := opts.NewListOpts(opts.ValidateEnv) cmd.Var(&flBuildArg, []string{"-build-arg"}, "Set build-time variables") ulimits := make(map[string]*ulimit.Ulimit) flUlimits := opts.NewUlimitOpt(&ulimits) cmd.Var(flUlimits, []string{"-ulimit"}, "Ulimit options") cmd.Require(flag.Exact, 1) // For trusted pull on "FROM <image>" instruction. addTrustedFlags(cmd, true) cmd.ParseFlags(args, true) var ( context io.ReadCloser isRemote bool err error ) _, err = exec.LookPath("git") hasGit := err == nil specifiedContext := cmd.Arg(0) var ( contextDir string tempDir string relDockerfile string ) switch { case specifiedContext == "-": tempDir, relDockerfile, err = getContextFromReader(cli.in, *dockerfileName) case urlutil.IsGitURL(specifiedContext) && hasGit: tempDir, relDockerfile, err = getContextFromGitURL(specifiedContext, *dockerfileName) case urlutil.IsURL(specifiedContext): tempDir, relDockerfile, err = getContextFromURL(cli.out, specifiedContext, *dockerfileName) default: contextDir, relDockerfile, err = getContextFromLocalDir(specifiedContext, *dockerfileName) } if err != nil { return fmt.Errorf("unable to prepare context: %s", err) } if tempDir != "" { defer os.RemoveAll(tempDir) contextDir = tempDir } // Resolve the FROM lines in the Dockerfile to trusted digest references // using Notary. On a successful build, we must tag the resolved digests // to the original name specified in the Dockerfile. newDockerfile, resolvedTags, err := rewriteDockerfileFrom(filepath.Join(contextDir, relDockerfile), cli.trustedReference) if err != nil { return fmt.Errorf("unable to process Dockerfile: %v", err) } defer newDockerfile.Close() // And canonicalize dockerfile name to a platform-independent one relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile) if err != nil { return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err) } f, err := os.Open(filepath.Join(contextDir, ".dockerignore")) if err != nil && !os.IsNotExist(err) { return err } var excludes []string if err == nil { excludes, err = utils.ReadDockerIgnore(f) if err != nil { return err } } if err := utils.ValidateContextDirectory(contextDir, excludes); err != nil { return fmt.Errorf("Error checking context: '%s'.", err) } // If .dockerignore mentions .dockerignore or the Dockerfile // then make sure we send both files over to the daemon // because Dockerfile is, obviously, needed no matter what, and // .dockerignore is needed to know if either one needs to be // removed. The deamon will remove them for us, if needed, after it // parses the Dockerfile. Ignore errors here, as they will have been // caught by ValidateContextDirectory above. var includes = []string{"."} keepThem1, _ := fileutils.Matches(".dockerignore", excludes) keepThem2, _ := fileutils.Matches(relDockerfile, excludes) if keepThem1 || keepThem2 { includes = append(includes, ".dockerignore", relDockerfile) } context, err = archive.TarWithOptions(contextDir, &archive.TarOptions{ Compression: archive.Uncompressed, ExcludePatterns: excludes, IncludeFiles: includes, }) if err != nil { return err } // Wrap the tar archive to replace the Dockerfile entry with the rewritten // Dockerfile which uses trusted pulls. context = replaceDockerfileTarWrapper(context, newDockerfile, relDockerfile) // Setup an upload progress bar // FIXME: ProgressReader shouldn't be this annoying to use sf := streamformatter.NewStreamFormatter() var body io.Reader = progressreader.New(progressreader.Config{ In: context, Out: cli.out, Formatter: sf, NewLines: true, ID: "", Action: "Sending build context to Docker daemon", }) var memory int64 if *flMemoryString != "" { parsedMemory, err := units.RAMInBytes(*flMemoryString) if err != nil { return err } memory = parsedMemory } var memorySwap int64 if *flMemorySwap != "" { if *flMemorySwap == "-1" { memorySwap = -1 } else { parsedMemorySwap, err := units.RAMInBytes(*flMemorySwap) if err != nil { return err } memorySwap = parsedMemorySwap } } // Send the build context v := url.Values{ "t": flTags.GetAll(), } if *suppressOutput { v.Set("q", "1") } if isRemote { v.Set("remote", cmd.Arg(0)) } if *noCache { v.Set("nocache", "1") } if *rm { v.Set("rm", "1") } else { v.Set("rm", "0") } if *forceRm { v.Set("forcerm", "1") } if *pull { v.Set("pull", "1") } v.Set("cpusetcpus", *flCPUSetCpus) v.Set("cpusetmems", *flCPUSetMems) v.Set("cpushares", strconv.FormatInt(*flCPUShares, 10)) v.Set("cpuquota", strconv.FormatInt(*flCPUQuota, 10)) v.Set("cpuperiod", strconv.FormatInt(*flCPUPeriod, 10)) v.Set("memory", strconv.FormatInt(memory, 10)) v.Set("memswap", strconv.FormatInt(memorySwap, 10)) v.Set("cgroupparent", *flCgroupParent) v.Set("dockerfile", relDockerfile) ulimitsVar := flUlimits.GetList() ulimitsJSON, err := json.Marshal(ulimitsVar) if err != nil { return err } v.Set("ulimits", string(ulimitsJSON)) // collect all the build-time environment variables for the container buildArgs := runconfig.ConvertKVStringsToMap(flBuildArg.GetAll()) buildArgsJSON, err := json.Marshal(buildArgs) if err != nil { return err } v.Set("buildargs", string(buildArgsJSON)) headers := http.Header(make(map[string][]string)) buf, err := json.Marshal(cli.configFile.AuthConfigs) if err != nil { return err } headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) headers.Set("Content-Type", "application/tar") sopts := &streamOpts{ rawTerminal: true, in: body, out: cli.out, headers: headers, } serverResp, err := cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), sopts) // Windows: show error message about modified file permissions. if runtime.GOOS == "windows" { h, err := httputils.ParseServerHeader(serverResp.header.Get("Server")) if err == nil { if h.OS != "windows" { fmt.Fprintln(cli.err, `SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`) } } } if jerr, ok := err.(*jsonmessage.JSONError); ok { // If no error code is set, default to 1 if jerr.Code == 0 { jerr.Code = 1 } return Cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code} } if err != nil { return err } // Since the build was successful, now we must tag any of the resolved // images from the above Dockerfile rewrite. for _, resolved := range resolvedTags { if err := cli.tagTrusted(resolved.repoInfo, resolved.digestRef, resolved.tagRef); err != nil { return err } } return nil }