// Import imports an image, getting the archived layer data either from // inConfig (if src is "-"), or from a URI specified in src. Progress output is // written to outStream. Repository and tag names can optionally be given in // the repo and tag arguments, respectively. func (s *TagStore) Import(src string, repo string, tag string, inConfig io.ReadCloser, outStream io.Writer, containerConfig *runconfig.Config) error { var ( sf = streamformatter.NewJSONStreamFormatter() archive io.ReadCloser resp *http.Response ) if src == "-" { archive = inConfig } else { inConfig.Close() u, err := url.Parse(src) if err != nil { return err } if u.Scheme == "" { u.Scheme = "http" u.Host = src u.Path = "" } outStream.Write(sf.FormatStatus("", "Downloading from %s", u)) resp, err = httputils.Download(u.String()) if err != nil { return err } progressReader := progressreader.New(progressreader.Config{ In: resp.Body, Out: outStream, Formatter: sf, Size: resp.ContentLength, NewLines: true, ID: "", Action: "Importing", }) archive = progressReader } defer archive.Close() img, err := s.graph.Create(archive, "", "", "Imported from "+src, "", nil, containerConfig) if err != nil { return err } // Optionally register the image at REPO/TAG if repo != "" { if err := s.Tag(repo, tag, img.ID, true); err != nil { return err } } outStream.Write(sf.FormatStatus("", img.ID)) logID := img.ID if tag != "" { logID = utils.ImageReference(logID, tag) } s.eventsService.Log("import", logID, "") return nil }
// getContextFromURL uses a remote URL as context for a `docker build`. The // remote resource is downloaded as either a Dockerfile or a tar archive. // Returns the tar archive used for the context and a path of the // dockerfile inside the tar. func getContextFromURL(out io.Writer, remoteURL, dockerfileName string) (io.ReadCloser, string, error) { response, err := httputils.Download(remoteURL) if err != nil { return nil, "", fmt.Errorf("unable to download remote context %s: %v", remoteURL, err) } progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(out, true) // Pass the response body through a progress reader. progReader := progress.NewProgressReader(response.Body, progressOutput, response.ContentLength, "", fmt.Sprintf("Downloading build context from remote url: %s", remoteURL)) return getContextFromReader(ioutils.NewReadCloserWrapper(progReader, func() error { return response.Body.Close() }), dockerfileName) }
// getContextFromURL uses a remote URL as context for a `docker build`. The // remote resource is downloaded as either a Dockerfile or a context tar // archive and stored in a temporary directory used as the context directory. // Returns the absolute path to the temporary context directory, the relative // path of the dockerfile in that context directory, and a non-nil error on // success. func getContextFromURL(out io.Writer, remoteURL, dockerfileName string) (absContextDir, relDockerfile string, err error) { response, err := httputils.Download(remoteURL) if err != nil { return "", "", fmt.Errorf("unable to download remote context %s: %v", remoteURL, err) } defer response.Body.Close() // Pass the response body through a progress reader. progReader := &progressreader.Config{ In: response.Body, Out: out, Formatter: streamformatter.NewStreamFormatter(), Size: response.ContentLength, NewLines: true, ID: "", Action: fmt.Sprintf("Downloading build context from remote url: %s", remoteURL), } return getContextFromReader(progReader, dockerfileName) }
// MakeRemoteContext downloads a context from remoteURL and returns it. // // If contentTypeHandlers is non-nil, then the Content-Type header is read along with a maximum of // maxPreambleLength bytes from the body to help detecting the MIME type. // Look at acceptableRemoteMIME for more details. // // If a match is found, then the body is sent to the contentType handler and a (potentially compressed) tar stream is expected // to be returned. If no match is found, it is assumed the body is a tar stream (compressed or not). // In either case, an (assumed) tar stream is passed to MakeTarSumContext whose result is returned. func MakeRemoteContext(remoteURL string, contentTypeHandlers map[string]func(io.ReadCloser) (io.ReadCloser, error)) (ModifiableContext, error) { f, err := httputils.Download(remoteURL) if err != nil { return nil, fmt.Errorf("error downloading remote context %s: %v", remoteURL, err) } defer f.Body.Close() var contextReader io.ReadCloser if contentTypeHandlers != nil { contentType := f.Header.Get("Content-Type") clen := f.ContentLength contentType, contextReader, err = inspectResponse(contentType, f.Body, clen) if err != nil { return nil, fmt.Errorf("error detecting content type for remote %s: %v", remoteURL, err) } defer contextReader.Close() // This loop tries to find a content-type handler for the detected content-type. // If it could not find one from the caller-supplied map, it tries the empty content-type `""` // which is interpreted as a fallback handler (usually used for raw tar contexts). for _, ct := range []string{contentType, ""} { if fn, ok := contentTypeHandlers[ct]; ok { defer contextReader.Close() if contextReader, err = fn(contextReader); err != nil { return nil, err } break } } } // Pass through - this is a pre-packaged context, presumably // with a Dockerfile with the right name inside it. return MakeTarSumContext(contextReader) }
// ImportImage imports an image, getting the archived layer data either from // inConfig (if src is "-"), or from a URI specified in src. Progress output is // written to outStream. Repository and tag names can optionally be given in // the repo and tag arguments, respectively. func (daemon *Daemon) ImportImage(src string, repository, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error { var ( sf = streamformatter.NewJSONStreamFormatter() rc io.ReadCloser resp *http.Response newRef reference.Named ) if repository != "" { var err error newRef, err = reference.ParseNamed(repository) if err != nil { return err } if _, isCanonical := newRef.(reference.Canonical); isCanonical { return errors.New("cannot import digest reference") } if tag != "" { newRef, err = reference.WithTag(newRef, tag) if err != nil { return err } } } config, err := dockerfile.BuildFromConfig(&container.Config{}, changes) if err != nil { return err } if src == "-" { rc = inConfig } else { inConfig.Close() u, err := url.Parse(src) if err != nil { return err } if u.Scheme == "" { u.Scheme = "http" u.Host = src u.Path = "" } outStream.Write(sf.FormatStatus("", "Downloading from %s", u)) resp, err = httputils.Download(u.String()) if err != nil { return err } progressOutput := sf.NewProgressOutput(outStream, true) rc = progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Importing") } defer rc.Close() if len(msg) == 0 { msg = "Imported from " + src } inflatedLayerData, err := archive.DecompressStream(rc) if err != nil { return err } // TODO: support windows baselayer? l, err := daemon.layerStore.Register(inflatedLayerData, "") if err != nil { return err } defer layer.ReleaseAndLog(daemon.layerStore, l) created := time.Now().UTC() imgConfig, err := json.Marshal(&image.Image{ V1Image: image.V1Image{ DockerVersion: dockerversion.Version, Config: config, Architecture: runtime.GOARCH, OS: runtime.GOOS, Created: created, Comment: msg, }, RootFS: &image.RootFS{ Type: "layers", DiffIDs: []layer.DiffID{l.DiffID()}, }, History: []image.History{{ Created: created, Comment: msg, }}, }) if err != nil { return err } id, err := daemon.imageStore.Create(imgConfig) if err != nil { return err } // FIXME: connect with commit code and call refstore directly if newRef != nil { if err := daemon.TagImageWithReference(id, newRef); err != nil { return err } } daemon.LogImageEvent(id.String(), id.String(), "import") outStream.Write(sf.FormatStatus("", id.String())) return nil }
func (b *Builder) download(srcURL string) (fi builder.FileInfo, err error) { // get filename from URL u, err := url.Parse(srcURL) if err != nil { return } path := filepath.FromSlash(u.Path) // Ensure in platform semantics if strings.HasSuffix(path, string(os.PathSeparator)) { path = path[:len(path)-1] } parts := strings.Split(path, string(os.PathSeparator)) filename := parts[len(parts)-1] if filename == "" { err = fmt.Errorf("cannot determine filename from url: %s", u) return } // Initiate the download resp, err := httputils.Download(srcURL) if err != nil { return } // Prepare file in a tmp dir tmpDir, err := ioutils.TempDir("", "docker-remote") if err != nil { return } defer func() { if err != nil { os.RemoveAll(tmpDir) } }() tmpFileName := filepath.Join(tmpDir, filename) tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) if err != nil { return } stdoutFormatter := b.Stdout.(*streamformatter.StdoutFormatter) progressOutput := stdoutFormatter.StreamFormatter.NewProgressOutput(stdoutFormatter.Writer, true) progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading") // Download and dump result to tmp file if _, err = io.Copy(tmpFile, progressReader); err != nil { tmpFile.Close() return } fmt.Fprintln(b.Stdout) // ignoring error because the file was already opened successfully tmpFileSt, err := tmpFile.Stat() if err != nil { return } tmpFile.Close() // Set the mtime to the Last-Modified header value if present // Otherwise just remove atime and mtime mTime := time.Time{} lastMod := resp.Header.Get("Last-Modified") if lastMod != "" { // If we can't parse it then just let it default to 'zero' // otherwise use the parsed time value if parsedMTime, err := http.ParseTime(lastMod); err == nil { mTime = parsedMTime } } if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil { return } // Calc the checksum, even if we're using the cache r, err := archive.Tar(tmpFileName, archive.Uncompressed) if err != nil { return } tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1) if err != nil { return } if _, err = io.Copy(ioutil.Discard, tarSum); err != nil { return } hash := tarSum.Sum(nil) r.Close() return &builder.HashedFileInfo{FileInfo: builder.PathFileInfo{FileInfo: tmpFileSt, FilePath: tmpFileName}, FileHash: hash}, nil }
func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool, allowWildcards bool) error { if origPath != "" && origPath[0] == '/' && len(origPath) > 1 { origPath = origPath[1:] } origPath = strings.TrimPrefix(origPath, "./") // Twiddle the destPath when its a relative path - meaning, make it // relative to the WORKINGDIR if !filepath.IsAbs(destPath) { hasSlash := strings.HasSuffix(destPath, "/") destPath = filepath.Join("/", b.Config.WorkingDir, destPath) // Make sure we preserve any trailing slash if hasSlash { destPath += "/" } } // In the remote/URL case, download it and gen its hashcode if urlutil.IsURL(origPath) { if !allowRemote { return fmt.Errorf("Source can't be a URL for %s", cmdName) } ci := copyInfo{} ci.origPath = origPath ci.hash = origPath // default to this but can change ci.destPath = destPath ci.decompress = false *cInfos = append(*cInfos, &ci) // Initiate the download resp, err := httputils.Download(ci.origPath) if err != nil { return err } // Create a tmp dir tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote") if err != nil { return err } ci.tmpDir = tmpDirName // Create a tmp file within our tmp dir tmpFileName := filepath.Join(tmpDirName, "tmp") tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) if err != nil { return err } // Download and dump result to tmp file if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{ In: resp.Body, Out: b.OutOld, Formatter: b.StreamFormatter, Size: int(resp.ContentLength), NewLines: true, ID: "", Action: "Downloading", })); err != nil { tmpFile.Close() return err } fmt.Fprintf(b.OutStream, "\n") tmpFile.Close() // Set the mtime to the Last-Modified header value if present // Otherwise just remove atime and mtime times := make([]syscall.Timespec, 2) lastMod := resp.Header.Get("Last-Modified") if lastMod != "" { mTime, err := http.ParseTime(lastMod) // If we can't parse it then just let it default to 'zero' // otherwise use the parsed time value if err == nil { times[1] = syscall.NsecToTimespec(mTime.UnixNano()) } } if err := system.UtimesNano(tmpFileName, times); err != nil { return err } ci.origPath = filepath.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) // If the destination is a directory, figure out the filename. if strings.HasSuffix(ci.destPath, "/") { u, err := url.Parse(origPath) if err != nil { return err } path := u.Path if strings.HasSuffix(path, "/") { path = path[:len(path)-1] } parts := strings.Split(path, "/") filename := parts[len(parts)-1] if filename == "" { return fmt.Errorf("cannot determine filename from url: %s", u) } ci.destPath = ci.destPath + filename } // Calc the checksum, even if we're using the cache r, err := archive.Tar(tmpFileName, archive.Uncompressed) if err != nil { return err } tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1) if err != nil { return err } if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { return err } ci.hash = tarSum.Sum(nil) r.Close() return nil } // Deal with wildcards if allowWildcards && ContainsWildcards(origPath) { for _, fileInfo := range b.context.GetSums() { if fileInfo.Name() == "" { continue } match, _ := filepath.Match(origPath, fileInfo.Name()) if !match { continue } // Note we set allowWildcards to false in case the name has // a * in it calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression, false) } return nil } // Must be a dir or a file if err := b.checkPathForAddition(origPath); err != nil { return err } fi, _ := os.Stat(filepath.Join(b.contextPath, origPath)) ci := copyInfo{} ci.origPath = origPath ci.hash = origPath ci.destPath = destPath ci.decompress = allowDecompression *cInfos = append(*cInfos, &ci) // Deal with the single file case if !fi.IsDir() { // This will match first file in sums of the archive fis := b.context.GetSums().GetFile(ci.origPath) if fis != nil { ci.hash = "file:" + fis.Sum() } return nil } // Must be a dir var subfiles []string absOrigPath := filepath.Join(b.contextPath, ci.origPath) // Add a trailing / to make sure we only pick up nested files under // the dir and not sibling files of the dir that just happen to // start with the same chars if !strings.HasSuffix(absOrigPath, "/") { absOrigPath += "/" } // Need path w/o / too to find matching dir w/o trailing / absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1] for _, fileInfo := range b.context.GetSums() { absFile := filepath.Join(b.contextPath, fileInfo.Name()) // Any file in the context that starts with the given path will be // picked up and its hashcode used. However, we'll exclude the // root dir itself. We do this for a coupel of reasons: // 1 - ADD/COPY will not copy the dir itself, just its children // so there's no reason to include it in the hash calc // 2 - the metadata on the dir will change when any child file // changes. This will lead to a miss in the cache check if that // child file is in the .dockerignore list. if strings.HasPrefix(absFile, absOrigPath) && absFile != absOrigPathNoSlash { subfiles = append(subfiles, fileInfo.Sum()) } } sort.Strings(subfiles) hasher := sha256.New() hasher.Write([]byte(strings.Join(subfiles, ","))) ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil)) return nil }
// Build is the main interface of the package, it gathers the Builder // struct and calls builder.Run() to do all the real build job. func Build(d *daemon.Daemon, buildConfig *Config) error { var ( repoName string tag string context io.ReadCloser ) sf := streamformatter.NewJSONStreamFormatter() repoName, tag = parsers.ParseRepositoryTag(buildConfig.RepoName) if repoName != "" { if err := registry.ValidateRepositoryName(repoName); err != nil { return err } if len(tag) > 0 { if err := tags.ValidateTagName(tag); err != nil { return err } } } if buildConfig.RemoteURL == "" { context = ioutil.NopCloser(buildConfig.Context) } else if urlutil.IsGitURL(buildConfig.RemoteURL) { root, err := utils.GitClone(buildConfig.RemoteURL) if err != nil { return err } defer os.RemoveAll(root) c, err := archive.Tar(root, archive.Uncompressed) if err != nil { return err } context = c } else if urlutil.IsURL(buildConfig.RemoteURL) { f, err := httputils.Download(buildConfig.RemoteURL) if err != nil { return fmt.Errorf("Error downloading remote context %s: %v", buildConfig.RemoteURL, err) } defer f.Body.Close() ct := f.Header.Get("Content-Type") clen := int(f.ContentLength) contentType, bodyReader, err := inspectResponse(ct, f.Body, clen) defer bodyReader.Close() if err != nil { return fmt.Errorf("Error detecting content type for remote %s: %v", buildConfig.RemoteURL, err) } if contentType == httputils.MimeTypes.TextPlain { dockerFile, err := ioutil.ReadAll(bodyReader) if err != nil { return err } // When we're downloading just a Dockerfile put it in // the default name - don't allow the client to move/specify it buildConfig.DockerfileName = api.DefaultDockerfileName c, err := archive.Generate(buildConfig.DockerfileName, string(dockerFile)) if err != nil { return err } context = c } else { // Pass through - this is a pre-packaged context, presumably // with a Dockerfile with the right name inside it. prCfg := progressreader.Config{ In: bodyReader, Out: buildConfig.Stdout, Formatter: sf, Size: clen, NewLines: true, ID: "Downloading context", Action: buildConfig.RemoteURL, } context = progressreader.New(prCfg) } } defer context.Close() builder := &builder{ Daemon: d, OutStream: &streamformatter.StdoutFormater{ Writer: buildConfig.Stdout, StreamFormatter: sf, }, ErrStream: &streamformatter.StderrFormater{ Writer: buildConfig.Stdout, StreamFormatter: sf, }, Verbose: !buildConfig.SuppressOutput, UtilizeCache: !buildConfig.NoCache, Remove: buildConfig.Remove, ForceRemove: buildConfig.ForceRemove, Pull: buildConfig.Pull, OutOld: buildConfig.Stdout, StreamFormatter: sf, AuthConfigs: buildConfig.AuthConfigs, dockerfileName: buildConfig.DockerfileName, cpuShares: buildConfig.CPUShares, cpuPeriod: buildConfig.CPUPeriod, cpuQuota: buildConfig.CPUQuota, cpuSetCpus: buildConfig.CPUSetCpus, cpuSetMems: buildConfig.CPUSetMems, cgroupParent: buildConfig.CgroupParent, memory: buildConfig.Memory, memorySwap: buildConfig.MemorySwap, cancelled: buildConfig.WaitCancelled(), id: stringid.GenerateRandomID(), } defer func() { builder.Daemon.Graph().Release(builder.id, builder.activeImages...) }() id, err := builder.Run(context) if err != nil { return err } if repoName != "" { return d.Repositories().Tag(repoName, tag, id, true) } return nil }
func Build(d *daemon.Daemon, buildConfig *Config) error { var ( repoName string tag string context io.ReadCloser ) repoName, tag = parsers.ParseRepositoryTag(buildConfig.RepoName) if repoName != "" { if err := registry.ValidateRepositoryName(repoName); err != nil { return err } if len(tag) > 0 { if err := tags.ValidateTagName(tag); err != nil { return err } } } if buildConfig.RemoteURL == "" { context = ioutil.NopCloser(buildConfig.Context) } else if urlutil.IsGitURL(buildConfig.RemoteURL) { root, err := utils.GitClone(buildConfig.RemoteURL) if err != nil { return err } defer os.RemoveAll(root) c, err := archive.Tar(root, archive.Uncompressed) if err != nil { return err } context = c } else if urlutil.IsURL(buildConfig.RemoteURL) { f, err := httputils.Download(buildConfig.RemoteURL) if err != nil { return err } defer f.Body.Close() dockerFile, err := ioutil.ReadAll(f.Body) if err != nil { return err } // When we're downloading just a Dockerfile put it in // the default name - don't allow the client to move/specify it buildConfig.DockerfileName = api.DefaultDockerfileName c, err := archive.Generate(buildConfig.DockerfileName, string(dockerFile)) if err != nil { return err } context = c } defer context.Close() sf := streamformatter.NewJSONStreamFormatter() builder := &Builder{ Daemon: d, OutStream: &streamformatter.StdoutFormater{ Writer: buildConfig.Stdout, StreamFormatter: sf, }, ErrStream: &streamformatter.StderrFormater{ Writer: buildConfig.Stdout, StreamFormatter: sf, }, Verbose: !buildConfig.SuppressOutput, UtilizeCache: !buildConfig.NoCache, Remove: buildConfig.Remove, ForceRemove: buildConfig.ForceRemove, Pull: buildConfig.Pull, OutOld: buildConfig.Stdout, StreamFormatter: sf, AuthConfig: buildConfig.AuthConfig, ConfigFile: buildConfig.ConfigFile, dockerfileName: buildConfig.DockerfileName, cpuShares: buildConfig.CpuShares, cpuPeriod: buildConfig.CpuPeriod, cpuQuota: buildConfig.CpuQuota, cpuSetCpus: buildConfig.CpuSetCpus, cpuSetMems: buildConfig.CpuSetMems, cgroupParent: buildConfig.CgroupParent, memory: buildConfig.Memory, memorySwap: buildConfig.MemorySwap, cancelled: buildConfig.WaitCancelled(), } id, err := builder.Run(context) if err != nil { return err } if repoName != "" { return d.Repositories().Tag(repoName, tag, id, true) } return nil }
// ImportImage imports an image, getting the archived layer data either from // inConfig (if src is "-"), or from a URI specified in src. Progress output is // written to outStream. Repository and tag names can optionally be given in // the repo and tag arguments, respectively. func (daemon *Daemon) ImportImage(src string, newRef reference.Named, msg string, inConfig io.ReadCloser, outStream io.Writer, config *runconfig.Config) error { var ( sf = streamformatter.NewJSONStreamFormatter() archive io.ReadCloser resp *http.Response ) if src == "-" { archive = inConfig } else { inConfig.Close() u, err := url.Parse(src) if err != nil { return err } if u.Scheme == "" { u.Scheme = "http" u.Host = src u.Path = "" } outStream.Write(sf.FormatStatus("", "Downloading from %s", u)) resp, err = httputils.Download(u.String()) if err != nil { return err } progressOutput := sf.NewProgressOutput(outStream, true) archive = progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Importing") } defer archive.Close() if len(msg) == 0 { msg = "Imported from " + src } // TODO: support windows baselayer? l, err := daemon.layerStore.Register(archive, "") if err != nil { return err } defer layer.ReleaseAndLog(daemon.layerStore, l) created := time.Now().UTC() imgConfig, err := json.Marshal(&image.Image{ V1Image: image.V1Image{ DockerVersion: dockerversion.Version, Config: config, Architecture: runtime.GOARCH, OS: runtime.GOOS, Created: created, Comment: msg, }, RootFS: &image.RootFS{ Type: "layers", DiffIDs: []layer.DiffID{l.DiffID()}, }, History: []image.History{{ Created: created, Comment: msg, }}, }) if err != nil { return err } id, err := daemon.imageStore.Create(imgConfig) if err != nil { return err } // FIXME: connect with commit code and call refstore directly if newRef != nil { if err := daemon.TagImage(newRef, id.String()); err != nil { return err } } outStream.Write(sf.FormatStatus("", id.String())) daemon.EventsService.Log("import", id.String(), "") return nil }
func (b *BuilderJob) CmdBuild(job *engine.Job) error { if len(job.Args) != 0 { return fmt.Errorf("Usage: %s\n", job.Name) } var ( dockerfileName = job.Getenv("dockerfile") remoteURL = job.Getenv("remote") repoName = job.Getenv("t") suppressOutput = job.GetenvBool("q") noCache = job.GetenvBool("nocache") rm = job.GetenvBool("rm") forceRm = job.GetenvBool("forcerm") pull = job.GetenvBool("pull") memory = job.GetenvInt64("memory") memorySwap = job.GetenvInt64("memswap") cpuShares = job.GetenvInt64("cpushares") cpuSetCpus = job.Getenv("cpusetcpus") cpuSetMems = job.Getenv("cpusetmems") authConfig = ®istry.AuthConfig{} configFile = ®istry.ConfigFile{} tag string context io.ReadCloser ) job.GetenvJson("authConfig", authConfig) job.GetenvJson("configFile", configFile) repoName, tag = parsers.ParseRepositoryTag(repoName) if repoName != "" { if err := registry.ValidateRepositoryName(repoName); err != nil { return err } if len(tag) > 0 { if err := graph.ValidateTagName(tag); err != nil { return err } } } if remoteURL == "" { context = ioutil.NopCloser(job.Stdin) } else if urlutil.IsGitURL(remoteURL) { if !urlutil.IsGitTransport(remoteURL) { remoteURL = "https://" + remoteURL } root, err := ioutil.TempDir("", "docker-build-git") if err != nil { return err } defer os.RemoveAll(root) if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { return fmt.Errorf("Error trying to use git: %s (%s)", err, output) } c, err := archive.Tar(root, archive.Uncompressed) if err != nil { return err } context = c } else if urlutil.IsURL(remoteURL) { f, err := httputils.Download(remoteURL) if err != nil { return err } defer f.Body.Close() dockerFile, err := ioutil.ReadAll(f.Body) if err != nil { return err } // When we're downloading just a Dockerfile put it in // the default name - don't allow the client to move/specify it dockerfileName = api.DefaultDockerfileName c, err := archive.Generate(dockerfileName, string(dockerFile)) if err != nil { return err } context = c } defer context.Close() sf := streamformatter.NewStreamFormatter(job.GetenvBool("json")) builder := &Builder{ Daemon: b.Daemon, Engine: b.Engine, OutStream: &streamformatter.StdoutFormater{ Writer: job.Stdout, StreamFormatter: sf, }, ErrStream: &streamformatter.StderrFormater{ Writer: job.Stdout, StreamFormatter: sf, }, Verbose: !suppressOutput, UtilizeCache: !noCache, Remove: rm, ForceRemove: forceRm, Pull: pull, OutOld: job.Stdout, StreamFormatter: sf, AuthConfig: authConfig, AuthConfigFile: configFile, dockerfileName: dockerfileName, cpuShares: cpuShares, cpuSetCpus: cpuSetCpus, cpuSetMems: cpuSetMems, memory: memory, memorySwap: memorySwap, cancelled: job.WaitCancelled(), } id, err := builder.Run(context) if err != nil { return err } if repoName != "" { b.Daemon.Repositories().Set(repoName, tag, id, true) } return nil }
func (s *TagStore) CmdImport(job *engine.Job) error { if n := len(job.Args); n != 2 && n != 3 { return fmt.Errorf("Usage: %s SRC REPO [TAG]", job.Name) } var ( src = job.Args[0] repo = job.Args[1] tag string sf = streamformatter.NewStreamFormatter(job.GetenvBool("json")) archive archive.ArchiveReader resp *http.Response stdoutBuffer = bytes.NewBuffer(nil) newConfig runconfig.Config ) if len(job.Args) > 2 { tag = job.Args[2] } if src == "-" { archive = job.Stdin } else { u, err := url.Parse(src) if err != nil { return err } if u.Scheme == "" { u.Scheme = "http" u.Host = src u.Path = "" } job.Stdout.Write(sf.FormatStatus("", "Downloading from %s", u)) resp, err = httputils.Download(u.String()) if err != nil { return err } progressReader := progressreader.New(progressreader.Config{ In: resp.Body, Out: job.Stdout, Formatter: sf, Size: int(resp.ContentLength), NewLines: true, ID: "", Action: "Importing", }) defer progressReader.Close() archive = progressReader } buildConfigJob := job.Eng.Job("build_config") buildConfigJob.Stdout.Add(stdoutBuffer) buildConfigJob.Setenv("changes", job.Getenv("changes")) // FIXME this should be remove when we remove deprecated config param buildConfigJob.Setenv("config", job.Getenv("config")) if err := buildConfigJob.Run(); err != nil { return err } if err := json.NewDecoder(stdoutBuffer).Decode(&newConfig); err != nil { return err } img, err := s.graph.Create(archive, "", "", "Imported from "+src, "", nil, &newConfig) if err != nil { return err } // Optionally register the image at REPO/TAG if repo != "" { if err := s.Set(repo, tag, img.ID, true); err != nil { return err } } job.Stdout.Write(sf.FormatStatus("", img.ID)) logID := img.ID if tag != "" { logID = utils.ImageReference(logID, tag) } s.eventsService.Log("import", logID, "") return nil }