// DownloaderForSource determines what SCM plugin should be used for downloading // the sources from the repository. func DownloaderForSource(s string, forceCopy bool) (build.Downloader, string, error) { glog.V(4).Infof("DownloadForSource %s", s) details, mods := git.ParseFile(s) glog.V(4).Infof("return from ParseFile file exists %v proto specified %v use copy %v", details.FileExists, details.ProtoSpecified, details.UseCopy) if details.FileExists && details.BadRef { return nil, s, fmt.Errorf("local location referenced by %s exists but the input after the # is malformed", s) } if details.FileExists && mods != nil { glog.V(4).Infof("new source from parse file %s", mods.Path) if details.ProtoSpecified { s = mods.Path } else { // prepending with file:// is a precautionary step which previous incarnations of this code did; we // preserve that behavior (it is more explicit, if not absolutely necessary; but we do it here as was done before // vs. down in our generic git layer (which is leveraged separately in origin) s = "file://" + mods.Path } } if details.FileExists && (details.UseCopy || forceCopy) { return &file.File{util.NewFileSystem()}, s, nil } // If the source is valid GIT protocol (file://, ssh://, git://, git@, etc..) use GIT // binary to download the sources g := git.New() if g.ValidCloneSpec(s) { return &git.Clone{g, util.NewFileSystem()}, s, nil } return nil, s, fmt.Errorf("no downloader defined for location: %q", s) }
// DownloaderForSource determines what SCM plugin should be used for downloading // the sources from the repository. func DownloaderForSource(s string) (build.Downloader, string, error) { glog.V(4).Infof("DownloadForSource %s", s) details, mods := git.ParseFile(s) glog.V(4).Infof("return from ParseFile file exists %v proto specified %v use copy %v", details.FileExists, details.ProtoSpecified, details.UseCopy) if details.FileExists && details.BadRef { return nil, s, fmt.Errorf("local location referenced by %s exists but the input after the # is malformed", s) } if details.FileExists && mods != nil { glog.V(4).Infof("new path from parse file %s", mods.Path) s = mods.Path } if details.FileExists && details.UseCopy { return &file.File{util.NewFileSystem()}, s, nil } // If the source is valid GIT protocol (file://, ssh://, git://, git@, etc..) use GIT // binary to download the sources g := git.New() if g.ValidCloneSpec(s) { return &git.Clone{g, util.NewFileSystem()}, s, nil } return nil, s, fmt.Errorf("no downloader defined for location: %q", s) }
// DownloaderForSource determines what SCM plugin should be used for downloading // the sources from the repository. func DownloaderForSource(s string) (build.Downloader, string, error) { details, _ := git.ParseFile(s) if details.FileExists && details.UseCopy { if !details.ProtoSpecified { // since not using git, any resulting URLs need to be explicit with file:// protocol specified s = "file://" + s } return &file.File{util.NewFileSystem()}, s, nil } if details.ProtoSpecified && !details.FileExists { return nil, s, fmt.Errorf("local location: %s does not exist", s) } if !details.ProtoSpecified && details.FileExists { // if local file system, without file://, when using git, should not need file://, but we'll be safe; // satisfies previous constructed test case in scm_test.go as well s = "file://" + s } // If the source is valid GIT remote protocol (ssh://, git://, git@, etc..) use GIT // binary to download the sources g := git.New() if g.ValidCloneSpec(s) { return &git.Clone{g, util.NewFileSystem()}, s, nil } return nil, s, fmt.Errorf("no downloader defined for location: %q", s) }
// DownloaderForSource determines what SCM plugin should be used for downloading // the sources from the repository. func DownloaderForSource(s string) build.Downloader { // If the source starts with file:// and there is no GIT binary, use 'file' // SCM plugin if (strings.HasPrefix(s, "file://") || strings.HasPrefix(s, "/")) && !hasGitBinary() { return &file.File{util.NewFileSystem()} } g := git.New() if g.ValidCloneSpec(s) { return &git.Clone{g, util.NewFileSystem()} } glog.Errorf("No downloader defined for %q source URL", s) return nil }
// StreamFileAsTar streams the source file as a tar archive. // The permissions of the file is changed to 0666. func (t *stiTar) StreamDirAsTar(source, dest string, writer io.Writer) error { f, err := os.Open(source) if err != nil { return err } if info, _ := f.Stat(); !info.IsDir() { return fmt.Errorf("the source %q has to be directory, not a file", source) } defer f.Close() fs := util.NewFileSystem() tmpDir, err := ioutil.TempDir("", "s2i-") if err != nil { return err } defer os.RemoveAll(tmpDir) if err := fs.Copy(source, tmpDir); err != nil { return err } // Skip chmod if on windows OS if runtime.GOOS != "windows" { err = filepath.Walk(tmpDir, func(path string, info os.FileInfo, err error) error { if err != nil { return err } if info.IsDir() { return os.Chmod(path, 0777) } return os.Chmod(path, 0666) }) if err != nil { return err } } return t.CreateTarStream(tmpDir, false, writer) }
// StreamDirAsTarWithCallback streams the source directory as a tar archive. func (t *stiTar) StreamDirAsTarWithCallback(source string, writer io.Writer, walkFn filepath.WalkFunc, modifyInplace bool) error { f, err := os.Open(source) if err != nil { return err } defer f.Close() info, err := f.Stat() if err != nil { return err } if !info.IsDir() { return fmt.Errorf("the source %q has to be directory, not a file", source) } destDir := source if !modifyInplace { fs := util.NewFileSystem() tmpDir, err := ioutil.TempDir("", "s2i-") if err != nil { return err } defer os.RemoveAll(tmpDir) if err = fs.Copy(source, tmpDir); err != nil { return err } destDir = tmpDir } if err := filepath.Walk(destDir, walkFn); err != nil { return err } return t.CreateTarStream(destDir, false, writer) }
// StreamFileAsTarWithCallback streams the source file as a tar archive. func (t *stiTar) StreamFileAsTarWithCallback(source, name string, writer io.Writer, walkFn filepath.WalkFunc, modifyInplace bool) error { f, err := os.Open(source) if err != nil { return err } defer f.Close() info, err := f.Stat() if err != nil { return err } if info.IsDir() { return fmt.Errorf("the source %q has to be regular file, not directory", source) } fs := util.NewFileSystem() tmpDir, err := ioutil.TempDir("", "s2i-") if err != nil { return err } defer os.RemoveAll(tmpDir) dst := filepath.Join(tmpDir, name) if err := fs.Copy(source, dst); err != nil { return err } fileInfo, fileErr := os.Stat(dst) if err := walkFn(dst, fileInfo, fileErr); err != nil { return err } return t.CreateTarStream(tmpDir, false, writer) }
// New returns the instance of STI builder strategy for the given config. // If the layeredBuilder parameter is specified, then the builder provided will // be used for the case that the base Docker image does not have 'tar' or 'bash' // installed. func New(req *api.Config) (*STI, error) { docker, err := docker.New(req.DockerConfig, req.PullAuthentication) if err != nil { return nil, err } inst := scripts.NewInstaller(req.BuilderImage, req.ScriptsURL, docker, req.PullAuthentication) b := &STI{ installer: inst, config: req, docker: docker, git: git.New(), fs: util.NewFileSystem(), tar: tar.New(), callbackInvoker: util.NewCallbackInvoker(), requiredScripts: []string{api.Assemble, api.Run}, optionalScripts: []string{api.SaveArtifacts}, externalScripts: map[string]bool{}, installedScripts: map[string]bool{}, scriptsURL: map[string]string{}, } // The sources are downloaded using the GIT downloader. // TODO: Add more SCM in future. b.source = &git.Clone{b.git, b.fs} b.garbage = &build.DefaultCleaner{b.fs, b.docker} b.layered, err = layered.New(req, b) // Set interfaces b.preparer = b b.artifacts = b b.scripts = b b.postExecutor = b return b, err }
// NewInstaller returns a new instance of the default Installer implementation func NewInstaller(image string, scriptsURL string, proxyConfig *api.ProxyConfig, docker docker.Docker, auth dockerClient.AuthConfiguration) Installer { m := DefaultScriptSourceManager{ Image: image, ScriptsURL: scriptsURL, dockerAuth: auth, docker: docker, fs: util.NewFileSystem(), download: NewDownloader(proxyConfig), } // Order is important here, first we try to get the scripts from provided URL, // then we look into sources and check for .s2i/bin scripts. if len(m.ScriptsURL) > 0 { m.Add(&URLScriptHandler{URL: m.ScriptsURL, download: m.download, fs: m.fs, name: ScriptURLHandler}) } m.Add(&SourceScriptHandler{fs: m.fs}) // If the detection handlers above fail, try to get the script url from the // docker image itself. defaultURL, err := m.docker.GetScriptsURL(m.Image) if err == nil && defaultURL != "" { m.Add(&URLScriptHandler{URL: defaultURL, download: m.download, fs: m.fs, name: ImageURLHandler}) } return &m }
// New returns a new instance of OnBuild builder func New(config *api.Config, overrides build.Overrides) (*OnBuild, error) { dockerHandler, err := docker.New(config.DockerConfig, config.PullAuthentication) if err != nil { return nil, err } b := &OnBuild{ docker: dockerHandler, git: git.New(), fs: util.NewFileSystem(), tar: tar.New(), } // Use STI Prepare() and download the 'run' script optionally. s, err := sti.New(config, overrides) s.SetScripts([]string{}, []string{api.Assemble, api.Run}) downloader := overrides.Downloader if downloader == nil { d, sourceURL, err := scm.DownloaderForSource(config.Source) if err != nil { return nil, err } downloader = d config.Source = sourceURL } b.source = onBuildSourceHandler{ Downloader: downloader, Preparer: s, Ignorer: &ignore.DockerIgnorer{}, } b.garbage = &build.DefaultCleaner{b.fs, b.docker} return b, nil }
func (d *NetworkDiagnostic) copyNetworkPodInfo(pod *kapi.Pod) error { tmp, err := ioutil.TempFile("", "network-diags") if err != nil { return fmt.Errorf("Can not create local temporary file for tar: %v", err) } defer os.Remove(tmp.Name()) // Tar logdir on the remote node and copy to a local temporary file errBuf := &bytes.Buffer{} nodeLogDir := filepath.Join(util.NetworkDiagDefaultLogDir, util.NetworkDiagNodeLogDirPrefix, pod.Spec.NodeName) cmd := []string{"chroot", util.NetworkDiagContainerMountPath, "tar", "-C", nodeLogDir, "-c", "."} if err = util.Execute(d.Factory, cmd, pod, nil, tmp, errBuf); err != nil { return fmt.Errorf("Creating remote tar locally failed: %v, %s", err, errBuf.String()) } if err := tmp.Close(); err != nil { return fmt.Errorf("Closing temporary tar file %s failed: %v", tmp.Name(), err) } // Extract copied temporary file locally tmp, err = os.Open(tmp.Name()) if err != nil { return fmt.Errorf("Can not open temporary tar file %s: %v", tmp.Name(), err) } defer tmp.Close() tarHelper := tar.New(s2iutil.NewFileSystem()) tarHelper.SetExclusionPattern(nil) logdir := filepath.Join(d.LogDir, util.NetworkDiagNodeLogDirPrefix, pod.Spec.NodeName) err = tarHelper.ExtractTarStream(logdir, tmp) if err != nil { return fmt.Errorf("Untar local directory failed: %v, %s", err, errBuf.String()) } return nil }
// StreamFileAsTar streams the source file as a tar archive. // The permissions of all files in archive is changed to 0666. func (t *stiTar) StreamFileAsTar(source, name string, writer io.Writer) error { f, err := os.Open(source) if err != nil { return err } if info, _ := f.Stat(); info.IsDir() { return fmt.Errorf("the source %q has to be regular file, not directory", source) } defer f.Close() fs := util.NewFileSystem() tmpDir, err := ioutil.TempDir("", "s2i-") if err != nil { return err } defer os.RemoveAll(tmpDir) dst := filepath.Join(tmpDir, name) if err := fs.Copy(source, dst); err != nil { return err } if runtime.GOOS != "windows" { if err := os.Chmod(dst, 0666); err != nil { return err } } return t.CreateTarStream(tmpDir, false, writer) }
// New returns the instance of STI builder strategy for the given config. // If the layeredBuilder parameter is specified, then the builder provided will // be used for the case that the base Docker image does not have 'tar' or 'bash' // installed. func New(req *api.Config, overrides build.Overrides) (*STI, error) { docker, err := dockerpkg.New(req.DockerConfig, req.PullAuthentication) if err != nil { return nil, err } var incrementalDocker dockerpkg.Docker if req.Incremental { incrementalDocker, err = dockerpkg.New(req.DockerConfig, req.IncrementalAuthentication) if err != nil { return nil, err } } inst := scripts.NewInstaller(req.BuilderImage, req.ScriptsURL, docker, req.PullAuthentication) b := &STI{ installer: inst, config: req, docker: docker, incrementalDocker: incrementalDocker, git: git.New(), fs: util.NewFileSystem(), tar: tar.New(), callbackInvoker: util.NewCallbackInvoker(), requiredScripts: []string{api.Assemble, api.Run}, optionalScripts: []string{api.SaveArtifacts}, externalScripts: map[string]bool{}, installedScripts: map[string]bool{}, scriptsURL: map[string]string{}, } // The sources are downloaded using the GIT downloader. // TODO: Add more SCM in future. // TODO: explicit decision made to customize processing for usage specifically vs. // leveraging overrides; also, we ultimately want to simplify s2i usage a good bit, // which would lead to replacing this quick short circuit (so this change is tactical) b.source = overrides.Downloader if b.source == nil && !req.Usage { downloader, sourceURL, err := scm.DownloaderForSource(req.Source, req.ForceCopy) if err != nil { return nil, err } b.source = downloader req.Source = sourceURL } b.garbage = &build.DefaultCleaner{b.fs, b.docker} b.layered, err = layered.New(req, b, overrides) // Set interfaces b.preparer = b // later on, if we support say .gitignore func in addition to .dockerignore func, setting // ignorer will be based on config setting b.ignorer = &ignore.DockerIgnorer{} b.artifacts = b b.scripts = b b.postExecutor = b return b, err }
// DownloadDirFromContainer downloads an entire directory of files from a remote // container. func DownloadDirFromContainer(client *docker.Client, container, src, dst string) error { downloader := newContainerDownloader(client, container, src) defer downloader.Close() tarReader := &removeLeadingDirectoryAdapter{Reader: tar.NewReader(downloader)} t := s2itar.New(s2iutil.NewFileSystem()) return t.ExtractTarStreamFromTarReader(dst, tarReader, nil) }
// New returns the instance of STI builder strategy for the given config. // If the layeredBuilder parameter is specified, then the builder provided will // be used for the case that the base Docker image does not have 'tar' or 'bash' // installed. func New(req *api.Config, overrides build.Overrides) (*STI, error) { docker, err := dockerpkg.New(req.DockerConfig, req.PullAuthentication) if err != nil { return nil, err } var incrementalDocker dockerpkg.Docker if req.Incremental { incrementalDocker, err = dockerpkg.New(req.DockerConfig, req.IncrementalAuthentication) if err != nil { return nil, err } } inst := scripts.NewInstaller(req.BuilderImage, req.ScriptsURL, docker, req.PullAuthentication) b := &STI{ installer: inst, config: req, docker: docker, incrementalDocker: incrementalDocker, git: git.New(), fs: util.NewFileSystem(), tar: tar.New(), callbackInvoker: util.NewCallbackInvoker(), requiredScripts: []string{api.Assemble, api.Run}, optionalScripts: []string{api.SaveArtifacts}, externalScripts: map[string]bool{}, installedScripts: map[string]bool{}, scriptsURL: map[string]string{}, } // The sources are downloaded using the GIT downloader. // TODO: Add more SCM in future. b.source = overrides.Downloader if b.source == nil { downloader, sourceURL, err := scm.DownloaderForSource(req.Source) if err != nil { return nil, err } b.source = downloader req.Source = sourceURL } b.garbage = &build.DefaultCleaner{b.fs, b.docker} b.layered, err = layered.New(req, b, overrides) // Set interfaces b.preparer = b // later on, if we support say .gitignore func in addition to .dockerignore func, setting // ignorer will be based on config setting b.ignorer = &ignore.DockerIgnorer{} b.artifacts = b b.scripts = b b.postExecutor = b return b, err }
// NewInstaller returns a new instance of the default Installer implementation func NewInstaller(image string, scriptsURL string, docker docker.Docker, auth dockerClient.AuthConfiguration) Installer { return &installer{ image: image, scriptsURL: scriptsURL, docker: docker, downloader: NewDownloader(), pullAuth: auth, fs: util.NewFileSystem(), } }
// NewDockerBuilder creates a new instance of DockerBuilder func NewDockerBuilder(dockerClient DockerClient, buildsClient client.BuildInterface, build *api.Build, gitClient GitClient, cgLimits *s2iapi.CGroupLimits) *DockerBuilder { return &DockerBuilder{ dockerClient: dockerClient, build: build, gitClient: gitClient, tar: tar.New(s2iutil.NewFileSystem()), client: buildsClient, cgLimits: cgLimits, } }
// NewUsage creates a new instance of the default Usage implementation func NewUsage(config *api.Config) (*Usage, error) { b, err := New(config, util.NewFileSystem(), build.Overrides{}) if err != nil { return nil, err } usage := Usage{ handler: b, config: config, garbage: b.garbage, } return &usage, nil }
func New(config *api.Config, scripts build.ScriptsHandler, overrides build.Overrides) (*Layered, error) { d, err := docker.New(config.DockerConfig, config.PullAuthentication) if err != nil { return nil, err } return &Layered{ docker: d, config: config, fs: util.NewFileSystem(), tar: tar.New(), scripts: scripts, }, nil }
// DownloaderForSource determines what SCM plugin should be used for downloading // the sources from the repository. func DownloaderForSource(s string) (build.Downloader, string, error) { // If the source is using file:// protocol but it is not a GIT repository, // trim the prefix and treat it as a file copy. if strings.HasPrefix(s, "file://") && !isLocalGitRepository(s) { s = strings.TrimPrefix(s, "file://") } // If the source is file:// protocol and it is GIT repository, but we don't // have GIT binary to fetch it, treat it as file copy. if strings.HasPrefix(s, "file://") && !hasGitBinary() { s = strings.TrimPrefix(s, "file://") } // If the source is valid GIT protocol (file://, git://, git@, etc..) use GIT // binary to download the sources if g := git.New(); g.ValidCloneSpec(s) { return &git.Clone{g, util.NewFileSystem()}, s, nil } // Convert relative path to absolute path. if !strings.HasPrefix(s, "/") { if absolutePath, err := filepath.Abs(s); err == nil { s = absolutePath } } if isLocalGitRepository(s) { return DownloaderForSource("file://" + s) } // If we have local directory and that directory exists, use file copy if _, err := os.Stat(s); err == nil { return &file.File{util.NewFileSystem()}, s, nil } return nil, s, fmt.Errorf("No downloader defined for location: %q", s) }
// New creates a Layered builder. func New(config *api.Config, scripts build.ScriptsHandler, overrides build.Overrides) (*Layered, error) { d, err := docker.New(config.DockerConfig, config.PullAuthentication) if err != nil { return nil, err } tarHandler := tar.New() tarHandler.SetExclusionPattern(regexp.MustCompile(config.ExcludeRegExp)) return &Layered{ docker: d, config: config, fs: util.NewFileSystem(), tar: tarHandler, scripts: scripts, }, nil }
// IsRemoteRepository checks whether the provided string is a remote repository or not func IsRemoteRepository(s string) bool { if !s2igit.New(s2iutil.NewFileSystem()).ValidCloneSpecRemoteOnly(s) { return false } url, err := url.Parse(s) if err != nil { return false } url.Fragment = "" gitRepo := git.NewRepository() if _, _, err := gitRepo.ListRemote(url.String()); err != nil { return false } return true }
// ParseRepository parses a string that may be in the Git format (git@) or URL format // and extracts the appropriate value. Any fragment on the URL is preserved. // // Protocols returned: // - http, https // - file // - git // - ssh func ParseRepository(s string) (*url.URL, error) { uri, err := url.Parse(s) if err != nil { return nil, err } // There are some shortcomings with url.Parse when it comes to GIT, namely wrt // the GIT local/file and ssh protocols - it does not handle implied schema (i.e. no <proto>:// prefix)well; // We handle those caveats here err = s2igit.New(s2iutil.NewFileSystem()).MungeNoProtocolURL(s, uri) if err != nil { return nil, err } return uri, nil }
// UploadFileToContainer uploads a file to a remote container. func UploadFileToContainer(client *docker.Client, container, src, dest string) error { uploader, errch := newContainerUploader(client, container, filepath.Dir(dest)) t := s2itar.New(s2iutil.NewFileSystem()) tarWriter := s2itar.RenameAdapter{Writer: tar.NewWriter(uploader), Old: filepath.Base(src), New: filepath.Base(dest)} err := t.CreateTarStreamToTarWriter(src, true, tarWriter, nil) if err == nil { err = tarWriter.Close() } uploader.Close() if err != nil { return err } return <-errch }
// Strategy creates the appropriate build strategy for the provided config, using // the overrides provided. Not all strategies support all overrides. func Strategy(config *api.Config, overrides build.Overrides) (build.Builder, api.BuildInfo, error) { var builder build.Builder var buildInfo api.BuildInfo fs := util.NewFileSystem() image, err := docker.GetBuilderImage(config) if err != nil { buildInfo.FailureReason = utilstatus.NewFailureReason( utilstatus.ReasonPullBuilderImageFailed, utilstatus.ReasonMessagePullBuilderImageFailed, ) return nil, buildInfo, err } config.HasOnBuild = image.OnBuild // if we're blocking onbuild, just do a normal s2i build flow // which won't do a docker build and invoke the onbuild commands if image.OnBuild && !config.BlockOnBuild { builder, err = onbuild.New(config, fs, overrides) if err != nil { buildInfo.FailureReason = utilstatus.NewFailureReason( utilstatus.ReasonGenericS2IBuildFailed, utilstatus.ReasonMessageGenericS2iBuildFailed, ) return nil, buildInfo, err } return builder, buildInfo, nil } builder, err = sti.New(config, fs, overrides) if err != nil { buildInfo.FailureReason = utilstatus.NewFailureReason( utilstatus.ReasonGenericS2IBuildFailed, utilstatus.ReasonMessageGenericS2iBuildFailed, ) return nil, buildInfo, err } return builder, buildInfo, err }
// New returns a new instance of OnBuild builder func New(config *api.Config) (*OnBuild, error) { dockerHandler, err := docker.New(config.DockerConfig, config.PullAuthentication) if err != nil { return nil, err } b := &OnBuild{ docker: dockerHandler, git: git.New(), fs: util.NewFileSystem(), tar: tar.New(), } // Use STI Prepare() and download the 'run' script optionally. s, err := sti.New(config) s.SetScripts([]string{}, []string{api.Assemble, api.Run}) b.source = onBuildSourceHandler{ &git.Clone{b.git, b.fs}, s, } b.garbage = &build.DefaultCleaner{b.fs, b.docker} return b, nil }
func streamPathToBuild(repo git.Repository, in io.Reader, out io.Writer, client osclient.BuildConfigInterface, fromDir, fromFile, fromRepo string, options *buildapi.BinaryBuildRequestOptions) (*buildapi.Build, error) { asDir, asFile, asRepo := len(fromDir) > 0, len(fromFile) > 0, len(fromRepo) > 0 if asRepo && !git.IsGitInstalled() { return nil, fmt.Errorf("cannot find git. Git is required to start a build from a repository. If git is not available, use --from-dir instead.") } var fromPath string switch { case asDir: fromPath = fromDir case asFile: fromPath = fromFile case asRepo: fromPath = fromRepo } var r io.Reader switch { case fromFile == "-": return nil, fmt.Errorf("--from-file=- is not supported") case fromDir == "-": r = in fmt.Fprintf(out, "Uploading archive file from STDIN as binary input for the build ...\n") case (asFile || asDir) && (strings.HasPrefix(fromPath, "http://") || strings.HasPrefix(fromPath, "https://")): resp, err := http.Get(fromPath) if err != nil { return nil, err } defer resp.Body.Close() if resp.StatusCode != 200 { return nil, fmt.Errorf("unable to download file %q: %s", fromPath, resp.Status) } r = resp.Body if asFile { options.AsFile = httpFileName(resp) if options.AsFile == "" { return nil, fmt.Errorf("unable to determine filename from HTTP headers or URL") } fmt.Fprintf(out, "Uploading file from %q as binary input for the build ...\n", fromPath) } else { fmt.Fprintf(out, "Uploading archive from %q as binary input for the build ...\n", fromPath) } default: clean := filepath.Clean(fromPath) path, err := filepath.Abs(fromPath) if err != nil { return nil, err } stat, err := os.Stat(path) if err != nil { return nil, err } if stat.IsDir() { commit := "HEAD" if len(options.Commit) > 0 { commit = options.Commit } info, gitErr := gitRefInfo(repo, path, commit) if gitErr == nil { options.Commit = info.GitSourceRevision.Commit options.Message = info.GitSourceRevision.Message options.AuthorName = info.GitSourceRevision.Author.Name options.AuthorEmail = info.GitSourceRevision.Author.Email options.CommitterName = info.GitSourceRevision.Committer.Name options.CommitterEmail = info.GitSourceRevision.Committer.Email } else { glog.V(6).Infof("Unable to read Git info from %q: %v", clean, gitErr) } // NOTE: It's important that this stays false unless we change the // path to something else, otherwise we will delete whatever path the // user provided. var usedTempDir bool = false var tempDirectory string = "" if asRepo { var contextDir string = "" fmt.Fprintf(out, "Uploading %q at commit %q as binary input for the build ...\n", clean, commit) if gitErr != nil { return nil, fmt.Errorf("the directory %q is not a valid Git repository: %v", clean, gitErr) } // If the user doesn't give us the root directory of the Git repo, // we still want the command to work. However, as this may be // unintended, we warn them. if gitRootDir, err := repo.GetRootDir(path); filepath.Clean(gitRootDir) != filepath.Clean(path) && err == nil { fmt.Fprintf(out, "WARNING: Using root dir %s for Git repository\n", gitRootDir) contextDir, _ = filepath.Rel(gitRootDir, path) path = gitRootDir } // Create a temp directory to move the repo contents to tempDirectory, err := ioutil.TempDir(os.TempDir(), "oc_cloning_"+options.Commit) if err != nil { return nil, err } // We only want to grab the contents of the specified commit, with // submodules included cloneOptions := []string{"--recursive"} if verbose := glog.V(3); !verbose { cloneOptions = append(cloneOptions, "--quiet") } // Clone the repository to a temp directory for future tar-ing if err := repo.CloneWithOptions(tempDirectory, path, cloneOptions...); err != nil { return nil, err } if err := repo.Checkout(tempDirectory, commit); err != nil { return nil, err } // We'll continue to use tar on the temp directory path = filepath.Join(tempDirectory, contextDir) usedTempDir = true } else { fmt.Fprintf(out, "Uploading directory %q as binary input for the build ...\n", clean) } pr, pw := io.Pipe() go func() { w := gzip.NewWriter(pw) if err := tar.New(s2iutil.NewFileSystem()).CreateTarStream(path, false, w); err != nil { pw.CloseWithError(err) } else { w.Close() pw.CloseWithError(io.EOF) } if usedTempDir { os.RemoveAll(tempDirectory) } }() r = pr } else { f, err := os.Open(path) if err != nil { return nil, err } defer f.Close() r = f if asFile { options.AsFile = filepath.Base(path) fmt.Fprintf(out, "Uploading file %q as binary input for the build ...\n", clean) } else { fmt.Fprintf(out, "Uploading archive file %q as binary input for the build ...\n", clean) } } } if !asFile { br := bufio.NewReaderSize(r, 4096) r = br if !isArchive(br) { fmt.Fprintf(out, "WARNING: the provided file may not be an archive (tar, tar.gz, or zip), use --from-file to prevent extraction\n") } } return client.InstantiateBinary(options, r) }
// New returns the instance of STI builder strategy for the given config. // If the layeredBuilder parameter is specified, then the builder provided will // be used for the case that the base Docker image does not have 'tar' or 'bash' // installed. func New(config *api.Config, overrides build.Overrides) (*STI, error) { docker, err := dockerpkg.New(config.DockerConfig, config.PullAuthentication) if err != nil { return nil, err } var incrementalDocker dockerpkg.Docker if config.Incremental { incrementalDocker, err = dockerpkg.New(config.DockerConfig, config.IncrementalAuthentication) if err != nil { return nil, err } } inst := scripts.NewInstaller(config.BuilderImage, config.ScriptsURL, config.ScriptDownloadProxyConfig, docker, config.PullAuthentication) tarHandler := tar.New() tarHandler.SetExclusionPattern(regexp.MustCompile(config.ExcludeRegExp)) builder := &STI{ installer: inst, config: config, docker: docker, incrementalDocker: incrementalDocker, git: git.New(), fs: util.NewFileSystem(), tar: tarHandler, callbackInvoker: util.NewCallbackInvoker(), requiredScripts: []string{api.Assemble, api.Run}, optionalScripts: []string{api.SaveArtifacts}, optionalRuntimeScripts: []string{api.AssembleRuntime}, externalScripts: map[string]bool{}, installedScripts: map[string]bool{}, scriptsURL: map[string]string{}, } if len(config.RuntimeImage) > 0 { builder.runtimeInstaller = scripts.NewInstaller(config.RuntimeImage, config.ScriptsURL, config.ScriptDownloadProxyConfig, docker, config.PullAuthentication) builder.runtimeDocker, err = dockerpkg.New(config.DockerConfig, config.RuntimeAuthentication) if err != nil { return builder, err } } // The sources are downloaded using the Git downloader. // TODO: Add more SCM in future. // TODO: explicit decision made to customize processing for usage specifically vs. // leveraging overrides; also, we ultimately want to simplify s2i usage a good bit, // which would lead to replacing this quick short circuit (so this change is tactical) builder.source = overrides.Downloader if builder.source == nil && !config.Usage { downloader, sourceURL, err := scm.DownloaderForSource(config.Source, config.ForceCopy) if err != nil { return nil, err } builder.source = downloader config.Source = sourceURL } builder.garbage = build.NewDefaultCleaner(builder.fs, builder.docker) builder.layered, err = layered.New(config, builder, overrides) // Set interfaces builder.preparer = builder // later on, if we support say .gitignore func in addition to .dockerignore func, setting // ignorer will be based on config setting builder.ignorer = &ignore.DockerIgnorer{} builder.artifacts = builder builder.scripts = builder builder.postExecutor = builder builder.initPostExecutorSteps() return builder, err }
// TestDockerfilePath validates that we can use a Dockerfile with a custom name, and in a sub-directory func TestDockerfilePath(t *testing.T) { tests := []struct { contextDir string dockerfilePath string dockerStrategy *api.DockerBuildStrategy }{ // default Dockerfile path { dockerfilePath: "Dockerfile", dockerStrategy: &api.DockerBuildStrategy{}, }, // custom Dockerfile path in the root context { dockerfilePath: "mydockerfile", dockerStrategy: &api.DockerBuildStrategy{ DockerfilePath: "mydockerfile", }, }, // custom Dockerfile path in a sub directory { dockerfilePath: "dockerfiles/mydockerfile", dockerStrategy: &api.DockerBuildStrategy{ DockerfilePath: "dockerfiles/mydockerfile", }, }, // custom Dockerfile path in a sub directory // with a contextDir { contextDir: "somedir", dockerfilePath: "dockerfiles/mydockerfile", dockerStrategy: &api.DockerBuildStrategy{ DockerfilePath: "dockerfiles/mydockerfile", }, }, } for _, test := range tests { buildDir, err := ioutil.TempDir(util.GetBaseDir(), "dockerfile-path") if err != nil { t.Errorf("failed to create tmpdir: %v", err) continue } absoluteDockerfilePath := filepath.Join(buildDir, test.contextDir, test.dockerfilePath) dockerfileContent := "FROM openshift/origin-base" if err = os.MkdirAll(filepath.Dir(absoluteDockerfilePath), os.FileMode(0750)); err != nil { t.Errorf("failed to create directory %s: %v", filepath.Dir(absoluteDockerfilePath), err) continue } if err = ioutil.WriteFile(absoluteDockerfilePath, []byte(dockerfileContent), os.FileMode(0644)); err != nil { t.Errorf("failed to write dockerfile to %s: %v", absoluteDockerfilePath, err) continue } build := &api.Build{ Spec: api.BuildSpec{ CommonSpec: api.CommonSpec{ Source: api.BuildSource{ Git: &api.GitBuildSource{ URI: "http://github.com/openshift/origin.git", }, ContextDir: test.contextDir, }, Strategy: api.BuildStrategy{ DockerStrategy: test.dockerStrategy, }, Output: api.BuildOutput{ To: &kapi.ObjectReference{ Kind: "DockerImage", Name: "test/test-result:latest", }, }, }, }, } dockerClient := &FakeDocker{ buildImageFunc: func(opts docker.BuildImageOptions) error { if opts.Dockerfile != test.dockerfilePath { t.Errorf("Unexpected dockerfile path: %s (expected: %s)", opts.Dockerfile, test.dockerfilePath) } return nil }, } dockerBuilder := &DockerBuilder{ dockerClient: dockerClient, build: build, gitClient: git.NewRepository(), tar: tar.New(s2iutil.NewFileSystem()), } // this will validate that the Dockerfile is readable // and append some labels to the Dockerfile if err = dockerBuilder.addBuildParameters(buildDir); err != nil { t.Errorf("failed to add build parameters: %v", err) continue } // check that our Dockerfile has been modified dockerfileData, err := ioutil.ReadFile(absoluteDockerfilePath) if err != nil { t.Errorf("failed to read dockerfile %s: %v", absoluteDockerfilePath, err) continue } if !strings.Contains(string(dockerfileData), dockerfileContent) { t.Errorf("Updated Dockerfile content does not contains the original Dockerfile content.\n\nOriginal content:\n%s\n\nUpdated content:\n%s\n", dockerfileContent, string(dockerfileData)) continue } // check that the docker client is called with the right Dockerfile parameter if err = dockerBuilder.dockerBuild(buildDir, "", []api.SecretBuildSource{}); err != nil { t.Errorf("failed to build: %v", err) continue } os.RemoveAll(buildDir) } }
func baseTest(t *testing.T, patterns []string, filesToDel []string, filesToKeep []string) { // create working dir workingDir, werr := util.NewFileSystem().CreateWorkingDirectory() if werr != nil { t.Errorf("problem allocating working dir %v \n", werr) } else { t.Logf("working directory is %s \n", workingDir) } defer func() { // clean up test cleanerr := os.RemoveAll(workingDir) if cleanerr != nil { t.Errorf("problem cleaning up %v \n", cleanerr) } }() c := &api.Config{WorkingDir: workingDir} // create source repo dir for .s2iignore that matches where ignore.go looks dpath := filepath.Join(c.WorkingDir, "upload", "src") derr := os.MkdirAll(dpath, 0777) if derr != nil { t.Errorf("Problem creating source repo dir %s with %v \n", dpath, derr) } c.WorkingSourceDir = dpath t.Logf("working source dir %s \n", dpath) // create s2iignore file ipath := filepath.Join(dpath, api.IgnoreFile) ifile, ierr := os.Create(ipath) defer ifile.Close() if ierr != nil { t.Errorf("Problem creating .s2iignore at %s with %v \n", ipath, ierr) } // write patterns to remove into s2ignore, but save ! exclusions filesToIgnore := make(map[string]string) for _, pattern := range patterns { t.Logf("storing pattern %s \n", pattern) _, serr := ifile.WriteString(pattern) if serr != nil { t.Errorf("Problem setting .s2iignore %v \n", serr) } if strings.HasPrefix(pattern, "!") { pattern = strings.Replace(pattern, "!", "", 1) t.Logf("Noting ignore pattern %s \n", pattern) filesToIgnore[pattern] = pattern } } // create slices the store files to create, maps for files which should be deleted, files which should be kept filesToCreate := make([]string, 0) filesToDelCheck := make(map[string]string) for _, fileToDel := range filesToDel { filesToDelCheck[fileToDel] = fileToDel filesToCreate = append(filesToCreate, fileToDel) } filesToKeepCheck := make(map[string]string) for _, fileToKeep := range filesToKeep { filesToKeepCheck[fileToKeep] = fileToKeep filesToCreate = append(filesToCreate, fileToKeep) } // create files for test for _, fileToCreate := range filesToCreate { fbpath := filepath.Join(dpath, fileToCreate) // ensure any subdirs off working dir exist dirpath := filepath.Dir(fbpath) derr := os.MkdirAll(dirpath, 0777) if derr != nil && !os.IsExist(derr) { t.Errorf("Problem creating subdirs %s with %v \n", dirpath, derr) } t.Logf("Going to create file %s given supplied suffix %s \n", fbpath, fileToCreate) fbfile, fberr := os.Create(fbpath) defer fbfile.Close() if fberr != nil { t.Errorf("Problem creating test file %v \n", fberr) } } // run ignorer algorithm ignorer := &DockerIgnorer{} ignorer.Ignore(c) // check if filesToDel, minus ignores, are gone, and filesToKeep are still there for _, fileToCheck := range filesToCreate { fbpath := filepath.Join(dpath, fileToCheck) t.Logf("Evaluating file %s from dir %s and file to check %s \n", fbpath, dpath, fileToCheck) // see if file still exists or not ofile, oerr := os.Open(fbpath) defer ofile.Close() var fileExists bool if oerr == nil { fileExists = true t.Logf("The file %s exists after Ignore was run \n", fbpath) } else { if os.IsNotExist(oerr) { t.Logf("The file %s does not exist after Ignore was run \n", fbpath) fileExists = false } else { t.Errorf("Could not verify existence of %s because of %v \n", fbpath, oerr) } } _, iok := filesToIgnore[fileToCheck] _, kok := filesToKeepCheck[fileToCheck] _, dok := filesToDelCheck[fileToCheck] // if file present, verify it is in ignore or keep list, and not in del list if fileExists { if iok { t.Logf("validated ignored file is still present %s \n ", fileToCheck) continue } if kok { t.Logf("validated file to keep is still present %s \n", fileToCheck) continue } if dok { t.Errorf("file which was cited to be deleted by caller to runTest exists %s \n", fileToCheck) continue } // if here, something unexpected t.Errorf("file not in ignore / keep / del list !?!?!?!? %s \n", fileToCheck) } else { if dok { t.Logf("file which should have been deleted is in fact gone %s \n", fileToCheck) continue } if iok { t.Errorf("file put into ignore list does not exist %s \n ", fileToCheck) continue } if kok { t.Errorf("file passed in with keep list does not exist %s \n", fileToCheck) continue } // if here, then something unexpected happened t.Errorf("file not in ignore / keep / del list !?!?!?!? %s \n", fileToCheck) } } }