func TestMakeRemoteContext(t *testing.T) { contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") defer cleanup() createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) mux := http.NewServeMux() server := httptest.NewServer(mux) serverURL, _ := url.Parse(server.URL) serverURL.Path = "/" + DefaultDockerfileName remoteURL := serverURL.String() mux.Handle("/", http.FileServer(http.Dir(contextDir))) remoteContext, err := MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){ httputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) { dockerfile, err := ioutil.ReadAll(rc) if err != nil { return nil, err } r, err := archive.Generate(DefaultDockerfileName, string(dockerfile)) if err != nil { return nil, err } return ioutil.NopCloser(r), nil }, }) if err != nil { t.Fatalf("Error when executing DetectContextFromRemoteURL: %s", err) } if remoteContext == nil { t.Fatalf("Remote context should not be nil") } tarSumCtx, ok := remoteContext.(*tarSumContext) if !ok { t.Fatalf("Cast error, remote context should be casted to tarSumContext") } fileInfoSums := tarSumCtx.sums if fileInfoSums.Len() != 1 { t.Fatalf("Size of file info sums should be 1, got: %d", fileInfoSums.Len()) } fileInfo := fileInfoSums.GetFile(DefaultDockerfileName) if fileInfo == nil { t.Fatalf("There should be file named %s in fileInfoSums", DefaultDockerfileName) } if fileInfo.Pos() != 0 { t.Fatalf("File %s should have position 0, got %d", DefaultDockerfileName, fileInfo.Pos()) } }
// DetectContextFromRemoteURL returns a context and in certain cases the name of the dockerfile to be used // irrespective of user input. // progressReader is only used if remoteURL is actually a URL (not empty, and not a Git endpoint). func DetectContextFromRemoteURL(r io.ReadCloser, remoteURL string, createProgressReader func(in io.ReadCloser) io.ReadCloser) (context builder.ModifiableContext, dockerfileName string, err error) { switch { case remoteURL == "": context, err = builder.MakeTarSumContext(r) case urlutil.IsGitURL(remoteURL): context, err = builder.MakeGitContext(remoteURL) case urlutil.IsURL(remoteURL): context, err = builder.MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){ httputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) { dockerfile, err := ioutil.ReadAll(rc) if err != nil { return nil, err } // dockerfileName is set to signal that the remote was interpreted as a single Dockerfile, in which case the caller // should use dockerfileName as the new name for the Dockerfile, irrespective of any other user input. dockerfileName = api.DefaultDockerfileName // TODO: return a context without tarsum return archive.Generate(dockerfileName, string(dockerfile)) }, // fallback handler (tar context) "": func(rc io.ReadCloser) (io.ReadCloser, error) { return createProgressReader(rc), nil }, }) default: err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL) } return }
func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status { if len(job.Args) != 0 { return job.Errorf("Usage: %s\n", job.Name) } var ( remoteURL = job.Getenv("remote") repoName = job.Getenv("t") suppressOutput = job.GetenvBool("q") noCache = job.GetenvBool("nocache") rm = job.GetenvBool("rm") forceRm = job.GetenvBool("forcerm") authConfig = ®istry.AuthConfig{} configFile = ®istry.ConfigFile{} tag string context io.ReadCloser ) job.GetenvJson("authConfig", authConfig) job.GetenvJson("configFile", configFile) repoName, tag = parsers.ParseRepositoryTag(repoName) if repoName != "" { if _, _, err := registry.ResolveRepositoryName(repoName); err != nil { return job.Error(err) } if len(tag) > 0 { if err := graph.ValidateTagName(tag); err != nil { return job.Error(err) } } } if remoteURL == "" { context = ioutil.NopCloser(job.Stdin) } else if utils.IsGIT(remoteURL) { if !utils.ValidGitTransport(remoteURL) { remoteURL = "https://" + remoteURL } root, err := ioutil.TempDir("", "docker-build-git") if err != nil { return job.Error(err) } defer os.RemoveAll(root) if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { return job.Errorf("Error trying to use git: %s (%s)", err, output) } c, err := archive.Tar(root, archive.Uncompressed) if err != nil { return job.Error(err) } context = c } else if utils.IsURL(remoteURL) { f, err := utils.Download(remoteURL) if err != nil { return job.Error(err) } defer f.Body.Close() dockerFile, err := ioutil.ReadAll(f.Body) if err != nil { return job.Error(err) } c, err := archive.Generate("Dockerfile", string(dockerFile)) if err != nil { return job.Error(err) } context = c } defer context.Close() sf := utils.NewStreamFormatter(job.GetenvBool("json")) builder := &Builder{ Daemon: b.Daemon, Engine: b.Engine, OutStream: &utils.StdoutFormater{ Writer: job.Stdout, StreamFormatter: sf, }, ErrStream: &utils.StderrFormater{ Writer: job.Stdout, StreamFormatter: sf, }, Verbose: !suppressOutput, UtilizeCache: !noCache, Remove: rm, ForceRemove: forceRm, OutOld: job.Stdout, StreamFormatter: sf, AuthConfig: authConfig, AuthConfigFile: configFile, } id, err := builder.Run(context) if err != nil { return job.Error(err) } if repoName != "" { b.Daemon.Repositories().Set(repoName, tag, id, false) } return engine.StatusOK }
// CmdBuild builds a new image from the source code at a given path. // // If '-' is provided instead of a path or URL, Docker will build an image from either a Dockerfile or tar archive read from STDIN. // // Usage: docker build [OPTIONS] PATH | URL | - func (cli *DockerCli) CmdBuild(args ...string) error { cmd := cli.Subcmd("build", "PATH | URL | -", "Build a new image from the source code at PATH", true) tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) for the image") suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers") noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image") rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build") forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers") pull := cmd.Bool([]string{"-pull"}, false, "Always attempt to pull a newer version of the image") dockerfileName := cmd.String([]string{"f", "-file"}, "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')") flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit") flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Total memory (memory + swap), '-1' to disable swap") flCPUShares := cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") flCpuQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) quota") flCPUSetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") flCPUSetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)") flCgroupParent := cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container") cmd.Require(flag.Exact, 1) cmd.ParseFlags(args, true) var ( context archive.Archive isRemote bool err error ) _, err = exec.LookPath("git") hasGit := err == nil if cmd.Arg(0) == "-" { // As a special case, 'docker build -' will build from either an empty context with the // contents of stdin as a Dockerfile, or a tar-ed context from stdin. buf := bufio.NewReader(cli.in) magic, err := buf.Peek(tarHeaderSize) if err != nil && err != io.EOF { return fmt.Errorf("failed to peek context header from STDIN: %v", err) } if !archive.IsArchive(magic) { dockerfile, err := ioutil.ReadAll(buf) if err != nil { return fmt.Errorf("failed to read Dockerfile from STDIN: %v", err) } // -f option has no meaning when we're reading it from stdin, // so just use our default Dockerfile name *dockerfileName = api.DefaultDockerfileName context, err = archive.Generate(*dockerfileName, string(dockerfile)) } else { context = ioutil.NopCloser(buf) } } else if urlutil.IsURL(cmd.Arg(0)) && (!urlutil.IsGitURL(cmd.Arg(0)) || !hasGit) { isRemote = true } else { root := cmd.Arg(0) if urlutil.IsGitURL(root) { root, err = utils.GitClone(root) if err != nil { return err } defer os.RemoveAll(root) } if _, err := os.Stat(root); err != nil { return err } absRoot, err := filepath.Abs(root) if err != nil { return err } filename := *dockerfileName // path to Dockerfile if *dockerfileName == "" { // No -f/--file was specified so use the default *dockerfileName = api.DefaultDockerfileName filename = filepath.Join(absRoot, *dockerfileName) // Just to be nice ;-) look for 'dockerfile' too but only // use it if we found it, otherwise ignore this check if _, err = os.Lstat(filename); os.IsNotExist(err) { tmpFN := path.Join(absRoot, strings.ToLower(*dockerfileName)) if _, err = os.Lstat(tmpFN); err == nil { *dockerfileName = strings.ToLower(*dockerfileName) filename = tmpFN } } } origDockerfile := *dockerfileName // used for error msg if filename, err = filepath.Abs(filename); err != nil { return err } // Verify that 'filename' is within the build context filename, err = symlink.FollowSymlinkInScope(filename, absRoot) if err != nil { return fmt.Errorf("The Dockerfile (%s) must be within the build context (%s)", origDockerfile, root) } // Now reset the dockerfileName to be relative to the build context *dockerfileName, err = filepath.Rel(absRoot, filename) if err != nil { return err } // And canonicalize dockerfile name to a platform-independent one *dockerfileName, err = archive.CanonicalTarNameForPath(*dockerfileName) if err != nil { return fmt.Errorf("Cannot canonicalize dockerfile path %s: %v", *dockerfileName, err) } if _, err = os.Lstat(filename); os.IsNotExist(err) { return fmt.Errorf("Cannot locate Dockerfile: %s", origDockerfile) } var includes = []string{"."} excludes, err := utils.ReadDockerIgnore(path.Join(root, ".dockerignore")) if err != nil { return err } // If .dockerignore mentions .dockerignore or the Dockerfile // then make sure we send both files over to the daemon // because Dockerfile is, obviously, needed no matter what, and // .dockerignore is needed to know if either one needs to be // removed. The deamon will remove them for us, if needed, after it // parses the Dockerfile. keepThem1, _ := fileutils.Matches(".dockerignore", excludes) keepThem2, _ := fileutils.Matches(*dockerfileName, excludes) if keepThem1 || keepThem2 { includes = append(includes, ".dockerignore", *dockerfileName) } if err := utils.ValidateContextDirectory(root, excludes); err != nil { return fmt.Errorf("Error checking context is accessible: '%s'. Please check permissions and try again.", err) } options := &archive.TarOptions{ Compression: archive.Uncompressed, ExcludePatterns: excludes, IncludeFiles: includes, } context, err = archive.TarWithOptions(root, options) if err != nil { return err } } // windows: show error message about modified file permissions // FIXME: this is not a valid warning when the daemon is running windows. should be removed once docker engine for windows can build. if runtime.GOOS == "windows" { logrus.Warn(`SECURITY WARNING: You are building a Docker image from Windows against a Linux Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`) } var body io.Reader // Setup an upload progress bar // FIXME: ProgressReader shouldn't be this annoying to use if context != nil { sf := streamformatter.NewStreamFormatter(false) body = progressreader.New(progressreader.Config{ In: context, Out: cli.out, Formatter: sf, NewLines: true, ID: "", Action: "Sending build context to Docker daemon", }) } var memory int64 if *flMemoryString != "" { parsedMemory, err := units.RAMInBytes(*flMemoryString) if err != nil { return err } memory = parsedMemory } var memorySwap int64 if *flMemorySwap != "" { if *flMemorySwap == "-1" { memorySwap = -1 } else { parsedMemorySwap, err := units.RAMInBytes(*flMemorySwap) if err != nil { return err } memorySwap = parsedMemorySwap } } // Send the build context v := &url.Values{} //Check if the given image name can be resolved if *tag != "" { repository, tag := parsers.ParseRepositoryTag(*tag) if err := registry.ValidateRepositoryName(repository); err != nil { return err } if len(tag) > 0 { if err := graph.ValidateTagName(tag); err != nil { return err } } } v.Set("t", *tag) if *suppressOutput { v.Set("q", "1") } if isRemote { v.Set("remote", cmd.Arg(0)) } if *noCache { v.Set("nocache", "1") } if *rm { v.Set("rm", "1") } else { v.Set("rm", "0") } if *forceRm { v.Set("forcerm", "1") } if *pull { v.Set("pull", "1") } v.Set("cpusetcpus", *flCPUSetCpus) v.Set("cpusetmems", *flCPUSetMems) v.Set("cpushares", strconv.FormatInt(*flCPUShares, 10)) v.Set("cpuquota", strconv.FormatInt(*flCpuQuota, 10)) v.Set("memory", strconv.FormatInt(memory, 10)) v.Set("memswap", strconv.FormatInt(memorySwap, 10)) v.Set("cgroupparent", *flCgroupParent) v.Set("dockerfile", *dockerfileName) headers := http.Header(make(map[string][]string)) buf, err := json.Marshal(cli.configFile.AuthConfigs) if err != nil { return err } headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) if context != nil { headers.Set("Content-Type", "application/tar") } sopts := &streamOpts{ rawTerminal: true, in: body, out: cli.out, headers: headers, } err = cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), sopts) if jerr, ok := err.(*jsonmessage.JSONError); ok { // If no error code is set, default to 1 if jerr.Code == 0 { jerr.Code = 1 } return StatusError{Status: jerr.Message, StatusCode: jerr.Code} } return err }
// Build is the main interface of the package, it gathers the Builder // struct and calls builder.Run() to do all the real build job. func Build(d *daemon.Daemon, buildConfig *Config) error { var ( repoName string tag string context io.ReadCloser ) sf := streamformatter.NewJSONStreamFormatter() repoName, tag = parsers.ParseRepositoryTag(buildConfig.RepoName) if repoName != "" { if err := registry.ValidateRepositoryName(repoName); err != nil { return err } if len(tag) > 0 { if err := tags.ValidateTagName(tag); err != nil { return err } } } if buildConfig.RemoteURL == "" { context = ioutil.NopCloser(buildConfig.Context) } else if urlutil.IsGitURL(buildConfig.RemoteURL) { root, err := utils.GitClone(buildConfig.RemoteURL) if err != nil { return err } defer os.RemoveAll(root) c, err := archive.Tar(root, archive.Uncompressed) if err != nil { return err } context = c } else if urlutil.IsURL(buildConfig.RemoteURL) { f, err := httputils.Download(buildConfig.RemoteURL) if err != nil { return fmt.Errorf("Error downloading remote context %s: %v", buildConfig.RemoteURL, err) } defer f.Body.Close() ct := f.Header.Get("Content-Type") clen := int(f.ContentLength) contentType, bodyReader, err := inspectResponse(ct, f.Body, clen) defer bodyReader.Close() if err != nil { return fmt.Errorf("Error detecting content type for remote %s: %v", buildConfig.RemoteURL, err) } if contentType == httputils.MimeTypes.TextPlain { dockerFile, err := ioutil.ReadAll(bodyReader) if err != nil { return err } // When we're downloading just a Dockerfile put it in // the default name - don't allow the client to move/specify it buildConfig.DockerfileName = api.DefaultDockerfileName c, err := archive.Generate(buildConfig.DockerfileName, string(dockerFile)) if err != nil { return err } context = c } else { // Pass through - this is a pre-packaged context, presumably // with a Dockerfile with the right name inside it. prCfg := progressreader.Config{ In: bodyReader, Out: buildConfig.Stdout, Formatter: sf, Size: clen, NewLines: true, ID: "Downloading context", Action: buildConfig.RemoteURL, } context = progressreader.New(prCfg) } } defer context.Close() builder := &builder{ Daemon: d, OutStream: &streamformatter.StdoutFormater{ Writer: buildConfig.Stdout, StreamFormatter: sf, }, ErrStream: &streamformatter.StderrFormater{ Writer: buildConfig.Stdout, StreamFormatter: sf, }, Verbose: !buildConfig.SuppressOutput, UtilizeCache: !buildConfig.NoCache, Remove: buildConfig.Remove, ForceRemove: buildConfig.ForceRemove, Pull: buildConfig.Pull, OutOld: buildConfig.Stdout, StreamFormatter: sf, AuthConfigs: buildConfig.AuthConfigs, dockerfileName: buildConfig.DockerfileName, cpuShares: buildConfig.CPUShares, cpuPeriod: buildConfig.CPUPeriod, cpuQuota: buildConfig.CPUQuota, cpuSetCpus: buildConfig.CPUSetCpus, cpuSetMems: buildConfig.CPUSetMems, cgroupParent: buildConfig.CgroupParent, memory: buildConfig.Memory, memorySwap: buildConfig.MemorySwap, cancelled: buildConfig.WaitCancelled(), id: stringid.GenerateRandomID(), } defer func() { builder.Daemon.Graph().Release(builder.id, builder.activeImages...) }() id, err := builder.Run(context) if err != nil { return err } if repoName != "" { return d.Repositories().Tag(repoName, tag, id, true) } return nil }
func Build(d *daemon.Daemon, buildConfig *Config) error { var ( repoName string tag string context io.ReadCloser ) repoName, tag = parsers.ParseRepositoryTag(buildConfig.RepoName) if repoName != "" { if err := registry.ValidateRepositoryName(repoName); err != nil { return err } if len(tag) > 0 { if err := tags.ValidateTagName(tag); err != nil { return err } } } if buildConfig.RemoteURL == "" { context = ioutil.NopCloser(buildConfig.Context) } else if urlutil.IsGitURL(buildConfig.RemoteURL) { root, err := utils.GitClone(buildConfig.RemoteURL) if err != nil { return err } defer os.RemoveAll(root) c, err := archive.Tar(root, archive.Uncompressed) if err != nil { return err } context = c } else if urlutil.IsURL(buildConfig.RemoteURL) { f, err := httputils.Download(buildConfig.RemoteURL) if err != nil { return err } defer f.Body.Close() dockerFile, err := ioutil.ReadAll(f.Body) if err != nil { return err } // When we're downloading just a Dockerfile put it in // the default name - don't allow the client to move/specify it buildConfig.DockerfileName = api.DefaultDockerfileName c, err := archive.Generate(buildConfig.DockerfileName, string(dockerFile)) if err != nil { return err } context = c } defer context.Close() sf := streamformatter.NewJSONStreamFormatter() builder := &Builder{ Daemon: d, OutStream: &streamformatter.StdoutFormater{ Writer: buildConfig.Stdout, StreamFormatter: sf, }, ErrStream: &streamformatter.StderrFormater{ Writer: buildConfig.Stdout, StreamFormatter: sf, }, Verbose: !buildConfig.SuppressOutput, UtilizeCache: !buildConfig.NoCache, Remove: buildConfig.Remove, ForceRemove: buildConfig.ForceRemove, Pull: buildConfig.Pull, OutOld: buildConfig.Stdout, StreamFormatter: sf, AuthConfig: buildConfig.AuthConfig, ConfigFile: buildConfig.ConfigFile, dockerfileName: buildConfig.DockerfileName, cpuShares: buildConfig.CpuShares, cpuPeriod: buildConfig.CpuPeriod, cpuQuota: buildConfig.CpuQuota, cpuSetCpus: buildConfig.CpuSetCpus, cpuSetMems: buildConfig.CpuSetMems, cgroupParent: buildConfig.CgroupParent, memory: buildConfig.Memory, memorySwap: buildConfig.MemorySwap, cancelled: buildConfig.WaitCancelled(), } id, err := builder.Run(context) if err != nil { return err } if repoName != "" { return d.Repositories().Tag(repoName, tag, id, true) } return nil }
func TestMakeRemoteContext(t *testing.T) { contextDir, err := ioutil.TempDir("", "builder-remote-test") if err != nil { t.Fatalf("Error with creating temporary directory: %s", err) } defer os.RemoveAll(contextDir) testFilename := filepath.Join(contextDir, DefaultDockerfileName) err = ioutil.WriteFile(testFilename, []byte(textPlainDockerfile), 0777) if err != nil { t.Fatalf("Error when writing file (%s) contents: %s", testFilename, err) } mux := http.NewServeMux() server := httptest.NewServer(mux) serverURL, _ := url.Parse(server.URL) serverURL.Path = "/" + DefaultDockerfileName remoteURL := serverURL.String() mux.Handle("/", http.FileServer(http.Dir(contextDir))) remoteContext, err := MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){ httputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) { dockerfile, err := ioutil.ReadAll(rc) if err != nil { return nil, err } return archive.Generate(DefaultDockerfileName, string(dockerfile)) }, }) if err != nil { t.Fatalf("Error when executing DetectContextFromRemoteURL: %s", err) } if remoteContext == nil { t.Fatalf("Remote context should not be nil") } tarSumCtx, ok := remoteContext.(*tarSumContext) if !ok { t.Fatalf("Cast error, remote context should be casted to tarSumContext") } fileInfoSums := tarSumCtx.sums if fileInfoSums.Len() != 1 { t.Fatalf("Size of file info sums should be 1, got: %d", fileInfoSums.Len()) } fileInfo := fileInfoSums.GetFile(DefaultDockerfileName) if fileInfo == nil { t.Fatalf("There should be file named %s in fileInfoSums", DefaultDockerfileName) } if fileInfo.Pos() != 0 { t.Fatalf("File %s should have position 0, got %d", DefaultDockerfileName, fileInfo.Pos()) } }
func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status { if len(job.Args) != 0 { return job.Errorf("Usage: %s\n", job.Name) } var ( dockerfileName = job.Getenv("dockerfile") remoteURL = job.Getenv("remote") repoName = job.Getenv("t") suppressOutput = job.GetenvBool("q") noCache = job.GetenvBool("nocache") rm = job.GetenvBool("rm") forceRm = job.GetenvBool("forcerm") pull = job.GetenvBool("pull") memory = job.GetenvInt64("memory") memorySwap = job.GetenvInt64("memswap") cpuShares = job.GetenvInt64("cpushares") cpuSetCpus = job.Getenv("cpusetcpus") authConfig = ®istry.AuthConfig{} configFile = ®istry.ConfigFile{} tag string context io.ReadCloser ) job.GetenvJson("authConfig", authConfig) job.GetenvJson("configFile", configFile) repoName, tag = parsers.ParseRepositoryTag(repoName) if repoName != "" { if err := registry.ValidateRepositoryName(repoName); err != nil { return job.Error(err) } if len(tag) > 0 { if err := graph.ValidateTagName(tag); err != nil { return job.Error(err) } } } if remoteURL == "" { context = ioutil.NopCloser(job.Stdin) } else if urlutil.IsGitURL(remoteURL) { if !urlutil.IsGitTransport(remoteURL) { remoteURL = "https://" + remoteURL } root, err := ioutil.TempDir("", "docker-build-git") if err != nil { return job.Error(err) } defer os.RemoveAll(root) if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { return job.Errorf("Error trying to use git: %s (%s)", err, output) } c, err := archive.Tar(root, archive.Uncompressed) if err != nil { return job.Error(err) } context = c } else if urlutil.IsURL(remoteURL) { f, err := utils.Download(remoteURL) if err != nil { return job.Error(err) } defer f.Body.Close() dockerFile, err := ioutil.ReadAll(f.Body) if err != nil { return job.Error(err) } // When we're downloading just a Dockerfile put it in // the default name - don't allow the client to move/specify it dockerfileName = api.DefaultDockerfileName c, err := archive.Generate(dockerfileName, string(dockerFile)) if err != nil { return job.Error(err) } context = c } defer context.Close() sf := utils.NewStreamFormatter(job.GetenvBool("json")) builder := &Builder{ Daemon: b.Daemon, Engine: b.Engine, OutStream: &utils.StdoutFormater{ Writer: job.Stdout, StreamFormatter: sf, }, ErrStream: &utils.StderrFormater{ Writer: job.Stdout, StreamFormatter: sf, }, Verbose: !suppressOutput, UtilizeCache: !noCache, Remove: rm, ForceRemove: forceRm, Pull: pull, OutOld: job.Stdout, StreamFormatter: sf, AuthConfig: authConfig, AuthConfigFile: configFile, dockerfileName: dockerfileName, cpuShares: cpuShares, cpuSetCpus: cpuSetCpus, memory: memory, memorySwap: memorySwap, cancelled: job.WaitCancelled(), } id, err := builder.Run(context) if err != nil { return job.Error(err) } if repoName != "" { b.Daemon.Repositories().Set(repoName, tag, id, true) } return engine.StatusOK }