// UploadToContainerWithCallback uploads artifacts to the container. // If the source is a directory, then all files and sub-folders are copied into // the destination (which has to be directory as well). // If the source is a single file, then the file copied into destination (which // has to be full path to a file inside the container). // If the destination path is empty or set to ".", then we will try to figure // out the WORKDIR of the image that the container was created from and use that // as a destination. If the WORKDIR is not set, then we copy files into "/" // folder (docker upload default). func (d *stiDocker) UploadToContainerWithCallback(src, dest, container string, walkFn filepath.WalkFunc, modifyInplace bool) error { path := filepath.Dir(dest) f, err := os.Open(src) if err != nil { return err } info, _ := f.Stat() defer f.Close() t := tar.New() r, w := io.Pipe() if info.IsDir() { path = dest go func() { defer w.Close() if err := t.StreamDirAsTarWithCallback(src, w, walkFn, modifyInplace); err != nil { glog.V(0).Infof("error: Uploading directory to container failed: %v", err) } }() } else { go func() { defer w.Close() if err := t.StreamFileAsTarWithCallback(src, filepath.Base(dest), w, walkFn, modifyInplace); err != nil { glog.V(0).Infof("error: Uploading files to container failed: %v", err) } }() } glog.V(3).Infof("Uploading %q to %q ...", src, path) opts := docker.UploadToContainerOptions{Path: path, InputStream: r} return d.client.UploadToContainer(container, opts) }
// New returns the instance of STI builder strategy for the given config. // If the layeredBuilder parameter is specified, then the builder provided will // be used for the case that the base Docker image does not have 'tar' or 'bash' // installed. func New(req *api.Config) (*STI, error) { docker, err := docker.New(req.DockerConfig, req.PullAuthentication) if err != nil { return nil, err } inst := scripts.NewInstaller(req.BuilderImage, req.ScriptsURL, docker, req.PullAuthentication) b := &STI{ installer: inst, config: req, docker: docker, git: git.New(), fs: util.NewFileSystem(), tar: tar.New(), callbackInvoker: util.NewCallbackInvoker(), requiredScripts: []string{api.Assemble, api.Run}, optionalScripts: []string{api.SaveArtifacts}, externalScripts: map[string]bool{}, installedScripts: map[string]bool{}, scriptsURL: map[string]string{}, } // The sources are downloaded using the GIT downloader. // TODO: Add more SCM in future. b.source = &git.Clone{b.git, b.fs} b.garbage = &build.DefaultCleaner{b.fs, b.docker} b.layered, err = layered.New(req, b) // Set interfaces b.preparer = b b.artifacts = b b.scripts = b b.postExecutor = b return b, err }
// NewCmdRsync creates a new sync command func NewCmdRsync(name, parent string, f *clientcmd.Factory, out, errOut io.Writer) *cobra.Command { tarHelper := tar.New() tarHelper.SetExclusionPattern(nil) o := RsyncOptions{ Out: out, ErrOut: errOut, LocalExecutor: &defaultLocalExecutor{}, Tar: tarHelper, } cmd := &cobra.Command{ Use: fmt.Sprintf("%s SOURCE_DIR POD:DESTINATION_DIR", name), Short: "Copy local files to a pod", Long: rsyncLong, Example: fmt.Sprintf(rsyncExample, parent+" "+name), Run: func(c *cobra.Command, args []string) { kcmdutil.CheckErr(o.Complete(f, c, args)) kcmdutil.CheckErr(o.Validate()) kcmdutil.CheckErr(o.RunRsync()) }, } cmd.Flags().StringVarP(&o.ContainerName, "container", "c", "", "Container within the pod") cmd.Flags().BoolVarP(&o.Quiet, "quiet", "q", false, "Quiet copy") cmd.Flags().BoolVar(&o.Delete, "delete", false, "Delete files not present in source") cmd.Flags().BoolVar(&o.UseTar, "use-tar", false, "Use tar instead of rsync") return cmd }
func (d *NetworkDiagnostic) copyNetworkPodInfo(pod *kapi.Pod) error { tmp, err := ioutil.TempFile("", "network-diags") if err != nil { return fmt.Errorf("Can not create local temporary file for tar: %v", err) } defer os.Remove(tmp.Name()) // Tar logdir on the remote node and copy to a local temporary file errBuf := &bytes.Buffer{} nodeLogDir := filepath.Join(util.NetworkDiagDefaultLogDir, util.NetworkDiagNodeLogDirPrefix, pod.Spec.NodeName) cmd := []string{"chroot", util.NetworkDiagContainerMountPath, "tar", "-C", nodeLogDir, "-c", "."} if err = util.Execute(d.Factory, cmd, pod, nil, tmp, errBuf); err != nil { return fmt.Errorf("Creating remote tar locally failed: %v, %s", err, errBuf.String()) } if err := tmp.Close(); err != nil { return fmt.Errorf("Closing temporary tar file %s failed: %v", tmp.Name(), err) } // Extract copied temporary file locally tmp, err = os.Open(tmp.Name()) if err != nil { return fmt.Errorf("Can not open temporary tar file %s: %v", tmp.Name(), err) } defer tmp.Close() tarHelper := tar.New() tarHelper.SetExclusionPattern(nil) logdir := filepath.Join(d.LogDir, util.NetworkDiagNodeLogDirPrefix, pod.Spec.NodeName) err = tarHelper.ExtractTarStream(logdir, tmp) if err != nil { return fmt.Errorf("Untar local directory failed: %v, %s", err, errBuf.String()) } return nil }
// New returns a new instance of OnBuild builder func New(config *api.Config, overrides build.Overrides) (*OnBuild, error) { dockerHandler, err := docker.New(config.DockerConfig, config.PullAuthentication) if err != nil { return nil, err } b := &OnBuild{ docker: dockerHandler, git: git.New(), fs: util.NewFileSystem(), tar: tar.New(), } // Use STI Prepare() and download the 'run' script optionally. s, err := sti.New(config, overrides) s.SetScripts([]string{}, []string{api.Assemble, api.Run}) downloader := overrides.Downloader if downloader == nil { d, sourceURL, err := scm.DownloaderForSource(config.Source) if err != nil { return nil, err } downloader = d config.Source = sourceURL } b.source = onBuildSourceHandler{ Downloader: downloader, Preparer: s, Ignorer: &ignore.DockerIgnorer{}, } b.garbage = &build.DefaultCleaner{b.fs, b.docker} return b, nil }
// UploadToContainerWithCallback uploads artifacts to the container. // If the source is a directory, then all files and sub-folders are copied into // the destination (which has to be directory as well). // If the source is a single file, then the file copied into destination (which // has to be full path to a file inside the container). // If the destination path is empty or set to ".", then we will try to figure // out the WORKDIR of the image that the container was created from and use that // as a destination. If the WORKDIR is not set, then we copy files into "/" // folder (docker upload default). func (d *stiDocker) UploadToContainerWithCallback(src, dest, container string, walkFn filepath.WalkFunc, modifyInplace bool) error { path := filepath.Dir(dest) f, err := os.Open(src) if err != nil { return err } info, _ := f.Stat() defer f.Close() t := tar.New() r, w := io.Pipe() if info.IsDir() { path = dest go func() { defer w.Close() if err := t.StreamDirAsTarWithCallback(src, w, walkFn, modifyInplace); err != nil { glog.V(0).Infof("error: Uploading directory to container failed: %v", err) } }() } else { go func() { defer w.Close() if err := t.StreamFileAsTarWithCallback(src, filepath.Base(dest), w, walkFn, modifyInplace); err != nil { glog.V(0).Infof("error: Uploading files to container failed: %v", err) } }() } glog.V(3).Infof("Uploading %q to %q ...", src, path) ctx, cancel := getDefaultContext(DefaultDockerTimeout) defer cancel() return d.client.CopyToContainer(ctx, container, path, r, dockertypes.CopyToContainerOptions{}) }
// UploadToContainer uploads artifacts to the container. // If the source is a directory, then all files and sub-folders are copied into // the destination (which has to be directory as well). // If the source is a single file, then the file copied into destination (which // has to be full path to a file inside the container). // If the destination path is empty or set to ".", then we will try to figure // out the WORKDIR of the image that the container was created from and use that // as a destination. If the WORKDIR is not set, then we copy files into "/" // folder (docker upload default). func (d *stiDocker) UploadToContainer(src, dest, name string) error { path := filepath.Dir(dest) f, err := os.Open(src) if err != nil { return err } info, _ := f.Stat() defer f.Close() t := tar.New() r, w := io.Pipe() if info.IsDir() { path = dest go func() { defer w.Close() if err := t.StreamDirAsTar(src, dest, w); err != nil { glog.Errorf("Uploading directory to container failed: %v", err) } }() } else { go func() { defer w.Close() if err := t.StreamFileAsTar(src, filepath.Base(dest), w); err != nil { glog.Errorf("Uploading files to container failed: %v", err) } }() } glog.V(3).Infof("Uploading %q to %q ...", src, path) opts := docker.UploadToContainerOptions{Path: path, InputStream: r} return d.client.UploadToContainer(name, opts) }
// New returns the instance of STI builder strategy for the given config. // If the layeredBuilder parameter is specified, then the builder provided will // be used for the case that the base Docker image does not have 'tar' or 'bash' // installed. func New(req *api.Config, overrides build.Overrides) (*STI, error) { docker, err := dockerpkg.New(req.DockerConfig, req.PullAuthentication) if err != nil { return nil, err } var incrementalDocker dockerpkg.Docker if req.Incremental { incrementalDocker, err = dockerpkg.New(req.DockerConfig, req.IncrementalAuthentication) if err != nil { return nil, err } } inst := scripts.NewInstaller(req.BuilderImage, req.ScriptsURL, docker, req.PullAuthentication) b := &STI{ installer: inst, config: req, docker: docker, incrementalDocker: incrementalDocker, git: git.New(), fs: util.NewFileSystem(), tar: tar.New(), callbackInvoker: util.NewCallbackInvoker(), requiredScripts: []string{api.Assemble, api.Run}, optionalScripts: []string{api.SaveArtifacts}, externalScripts: map[string]bool{}, installedScripts: map[string]bool{}, scriptsURL: map[string]string{}, } // The sources are downloaded using the GIT downloader. // TODO: Add more SCM in future. // TODO: explicit decision made to customize processing for usage specifically vs. // leveraging overrides; also, we ultimately want to simplify s2i usage a good bit, // which would lead to replacing this quick short circuit (so this change is tactical) b.source = overrides.Downloader if b.source == nil && !req.Usage { downloader, sourceURL, err := scm.DownloaderForSource(req.Source, req.ForceCopy) if err != nil { return nil, err } b.source = downloader req.Source = sourceURL } b.garbage = &build.DefaultCleaner{b.fs, b.docker} b.layered, err = layered.New(req, b, overrides) // Set interfaces b.preparer = b // later on, if we support say .gitignore func in addition to .dockerignore func, setting // ignorer will be based on config setting b.ignorer = &ignore.DockerIgnorer{} b.artifacts = b b.scripts = b b.postExecutor = b return b, err }
// DownloadDirFromContainer downloads an entire directory of files from a remote // container. func DownloadDirFromContainer(client *docker.Client, container, src, dst string) error { downloader := newContainerDownloader(client, container, src) defer downloader.Close() tarReader := &removeLeadingDirectoryAdapter{Reader: tar.NewReader(downloader)} t := stitar.New() return t.ExtractTarStreamFromTarReader(dst, tarReader, nil) }
// NewDockerBuilder creates a new instance of DockerBuilder func NewDockerBuilder(dockerClient DockerClient, build *api.Build) *DockerBuilder { return &DockerBuilder{ dockerClient: dockerClient, build: build, git: git.New(), tar: tar.New(), urlTimeout: urlCheckTimeout, } }
// New returns the instance of STI builder strategy for the given config. // If the layeredBuilder parameter is specified, then the builder provided will // be used for the case that the base Docker image does not have 'tar' or 'bash' // installed. func New(req *api.Config, overrides build.Overrides) (*STI, error) { docker, err := dockerpkg.New(req.DockerConfig, req.PullAuthentication) if err != nil { return nil, err } var incrementalDocker dockerpkg.Docker if req.Incremental { incrementalDocker, err = dockerpkg.New(req.DockerConfig, req.IncrementalAuthentication) if err != nil { return nil, err } } inst := scripts.NewInstaller(req.BuilderImage, req.ScriptsURL, docker, req.PullAuthentication) b := &STI{ installer: inst, config: req, docker: docker, incrementalDocker: incrementalDocker, git: git.New(), fs: util.NewFileSystem(), tar: tar.New(), callbackInvoker: util.NewCallbackInvoker(), requiredScripts: []string{api.Assemble, api.Run}, optionalScripts: []string{api.SaveArtifacts}, externalScripts: map[string]bool{}, installedScripts: map[string]bool{}, scriptsURL: map[string]string{}, } // The sources are downloaded using the GIT downloader. // TODO: Add more SCM in future. b.source = overrides.Downloader if b.source == nil { downloader, sourceURL, err := scm.DownloaderForSource(req.Source) if err != nil { return nil, err } b.source = downloader req.Source = sourceURL } b.garbage = &build.DefaultCleaner{b.fs, b.docker} b.layered, err = layered.New(req, b, overrides) // Set interfaces b.preparer = b // later on, if we support say .gitignore func in addition to .dockerignore func, setting // ignorer will be based on config setting b.ignorer = &ignore.DockerIgnorer{} b.artifacts = b b.scripts = b b.postExecutor = b return b, err }
// NewDockerBuilder creates a new instance of DockerBuilder func NewDockerBuilder(dockerClient DockerClient, buildsClient client.BuildInterface, build *api.Build, gitClient GitClient) *DockerBuilder { return &DockerBuilder{ dockerClient: dockerClient, build: build, gitClient: gitClient, tar: tar.New(), urlTimeout: urlCheckTimeout, client: buildsClient, } }
// NewDockerBuilder creates a new instance of DockerBuilder func NewDockerBuilder(dockerClient DockerClient, buildsClient client.BuildInterface, build *api.Build, gitClient GitClient, cgLimits *s2iapi.CGroupLimits) *DockerBuilder { return &DockerBuilder{ dockerClient: dockerClient, build: build, gitClient: gitClient, tar: tar.New(s2iutil.NewFileSystem()), client: buildsClient, cgLimits: cgLimits, } }
// NewDockerBuilder creates a new instance of DockerBuilder func NewDockerBuilder(dockerClient DockerClient, buildsClient client.BuildInterface, build *api.Build, gitClient GitClient, cgLimits *s2iapi.CGroupLimits) *DockerBuilder { return &DockerBuilder{ dockerClient: dockerClient, build: build, gitClient: gitClient, tar: tar.New(), urlTimeout: initialURLCheckTimeout, client: buildsClient, cgLimits: cgLimits, } }
// NewDockerBuilder creates a new instance of DockerBuilder func NewDockerBuilder(dockerClient DockerClient, authCfg docker.AuthConfiguration, authPresent bool, build *api.Build) *DockerBuilder { return &DockerBuilder{ dockerClient: dockerClient, authPresent: authPresent, auth: authCfg, build: build, git: git.New(), tar: tar.New(), urlTimeout: urlCheckTimeout, } }
func New(config *api.Config, scripts build.ScriptsHandler, overrides build.Overrides) (*Layered, error) { d, err := docker.New(config.DockerConfig, config.PullAuthentication) if err != nil { return nil, err } return &Layered{ docker: d, config: config, fs: util.NewFileSystem(), tar: tar.New(), scripts: scripts, }, nil }
// CopyFromHost copies a set of files from the Docker host to the local file system func (h *HostHelper) CopyFromHost(sourceDir, destDir string) error { container, err := h.runner(). Image(h.image). Bind(fmt.Sprintf("%[1]s:%[1]s:ro", sourceDir)). Create() if err != nil { return err } defer func() { errors.LogError(h.client.RemoveContainer(docker.RemoveContainerOptions{ID: container})) }() localTarFile, err := ioutil.TempFile("", "local-copy-tar-") if err != nil { return err } localTarClosed := false defer func() { if !localTarClosed { errors.LogError(localTarFile.Close()) } errors.LogError(os.Remove(localTarFile.Name())) }() glog.V(4).Infof("Downloading from host path %s to local tar file: %s", sourceDir, localTarFile.Name()) err = h.client.DownloadFromContainer(container, docker.DownloadFromContainerOptions{ Path: sourceDir, OutputStream: localTarFile, }) if err != nil { return err } if err = localTarFile.Close(); err != nil { return err } localTarClosed = true inputTar, err := os.Open(localTarFile.Name()) if err != nil { return err } defer func() { errors.LogError(inputTar.Close()) }() tarHelper := tarhelper.New() tarHelper.SetExclusionPattern(nil) glog.V(4).Infof("Extracting temporary tar %s to directory %s", inputTar.Name(), destDir) var tarLog io.Writer if glog.V(5) { tarLog = os.Stderr } return tarHelper.ExtractTarStreamWithLogging(destDir, inputTar, tarLog) }
// UploadFileToContainer uploads a file to a remote container. func UploadFileToContainer(client *docker.Client, container, src, dest string) error { uploader := newContainerUploader(client, container, filepath.Dir(dest)) nullWalkFunc := func(path string, info os.FileInfo, err error) error { return err } t := stitar.New() err := t.StreamFileAsTarWithCallback(src, filepath.Base(dest), uploader, nullWalkFunc, false) if err != nil { uploader.Close() return err } return uploader.Close() }
// New creates a Layered builder. func New(config *api.Config, fs util.FileSystem, scripts build.ScriptsHandler, overrides build.Overrides) (*Layered, error) { d, err := docker.New(config.DockerConfig, config.PullAuthentication) if err != nil { return nil, err } tarHandler := tar.New(fs) tarHandler.SetExclusionPattern(regexp.MustCompile(config.ExcludeRegExp)) return &Layered{ docker: d, config: config, fs: fs, tar: tarHandler, scripts: scripts, }, nil }
// UploadFileToContainer uploads a file to a remote container. func UploadFileToContainer(client *docker.Client, container, src, dest string) error { uploader, errch := newContainerUploader(client, container, filepath.Dir(dest)) t := s2itar.New(s2iutil.NewFileSystem()) tarWriter := s2itar.RenameAdapter{Writer: tar.NewWriter(uploader), Old: filepath.Base(src), New: filepath.Base(dest)} err := t.CreateTarStreamToTarWriter(src, true, tarWriter, nil) if err == nil { err = tarWriter.Close() } uploader.Close() if err != nil { return err } return <-errch }
func newTarStrategy(f *clientcmd.Factory, c *cobra.Command, o *RsyncOptions) (copyStrategy, error) { tarHelper := tar.New() tarHelper.SetExclusionPattern(nil) remoteExec, err := newRemoteExecutor(f, o) if err != nil { return nil, err } return &tarStrategy{ Quiet: o.Quiet, Delete: o.Delete, Tar: tarHelper, RemoteExecutor: remoteExec, }, nil }
// New returns a new instance of OnBuild builder func New(config *api.Config) (*OnBuild, error) { dockerHandler, err := docker.New(config.DockerConfig, config.PullAuthentication) if err != nil { return nil, err } b := &OnBuild{ docker: dockerHandler, git: git.New(), fs: util.NewFileSystem(), tar: tar.New(), } // Use STI Prepare() and download the 'run' script optionally. s, err := sti.New(config) s.SetScripts([]string{}, []string{api.Assemble, api.Run}) b.source = onBuildSourceHandler{ &git.Clone{b.git, b.fs}, s, } b.garbage = &build.DefaultCleaner{b.fs, b.docker} return b, nil }
// UploadToContainerWithTarWriter uploads artifacts to the container. // If the source is a directory, then all files and sub-folders are copied into // the destination (which has to be directory as well). // If the source is a single file, then the file copied into destination (which // has to be full path to a file inside the container). func (d *stiDocker) UploadToContainerWithTarWriter(fs util.FileSystem, src, dest, container string, makeTarWriter func(io.Writer) s2itar.Writer) error { path := filepath.Dir(dest) r, w := io.Pipe() go func() { tarWriter := makeTarWriter(w) tarWriter = s2itar.RenameAdapter{Writer: tarWriter, Old: filepath.Base(src), New: filepath.Base(dest)} err := s2itar.New(fs).CreateTarStreamToTarWriter(src, true, tarWriter, nil) if err == nil { err = tarWriter.Close() } w.CloseWithError(err) }() glog.V(3).Infof("Uploading %q to %q ...", src, path) ctx, cancel := getDefaultContext() defer cancel() err := d.client.CopyToContainer(ctx, container, path, r, dockertypes.CopyToContainerOptions{}) if err != nil { glog.V(0).Infof("error: Uploading to container failed: %v", err) } return err }
func extractSourceFromImage(dockerClient DockerClient, image, buildDir string, paths []api.ImageSourcePath) error { glog.V(4).Infof("Extracting image source from %s", image) // Pre-pull image if a secret is specified pullSecret := os.Getenv(dockercfg.PullSourceAuthType) if len(pullSecret) > 0 { dockerAuth, present := dockercfg.NewHelper().GetDockerAuth(image, dockercfg.PullSourceAuthType) if present { dockerClient.PullImage(docker.PullImageOptions{Repository: image}, dockerAuth) } } // Create container to copy from container, err := dockerClient.CreateContainer(docker.CreateContainerOptions{ Config: &docker.Config{ Image: image, }, }) if err != nil { return fmt.Errorf("error creating source image container: %v", err) } defer dockerClient.RemoveContainer(docker.RemoveContainerOptions{ID: container.ID}) tarHelper := tar.New() tarHelper.SetExclusionPattern(nil) for _, path := range paths { glog.V(4).Infof("Extracting path %s from container %s to %s", path.SourcePath, container.ID, path.DestinationDir) err := copyImageSource(dockerClient, container.ID, path.SourcePath, filepath.Join(buildDir, path.DestinationDir), tarHelper) if err != nil { return fmt.Errorf("error copying source path %s to %s: %v", path.SourcePath, path.DestinationDir, err) } } return nil }
func streamPathToBuild(repo git.Repository, in io.Reader, out io.Writer, client osclient.BuildConfigInterface, fromDir, fromFile, fromRepo string, options *buildapi.BinaryBuildRequestOptions) (*buildapi.Build, error) { count := 0 asDir, asFile, asRepo := len(fromDir) > 0, len(fromFile) > 0, len(fromRepo) > 0 if asDir { count++ } if asFile { count++ } if asRepo { count++ } if count > 1 { return nil, fmt.Errorf("only one of --from-file, --from-repo, or --from-dir may be specified") } if asRepo && !git.IsGitInstalled() { return nil, fmt.Errorf("cannot find git. Git is required to start a build from a repository. If git is not available, use --from-dir instead.") } var r io.Reader switch { case fromFile == "-": return nil, fmt.Errorf("--from-file=- is not supported") case fromDir == "-": br := bufio.NewReaderSize(in, 4096) r = br if !isArchive(br) { fmt.Fprintf(out, "WARNING: the provided file may not be an archive (tar, tar.gz, or zip), use --from-file=- instead\n") } fmt.Fprintf(out, "Uploading archive file from STDIN as binary input for the build ...\n") default: var fromPath string switch { case asDir: fromPath = fromDir case asFile: fromPath = fromFile case asRepo: fromPath = fromRepo } clean := filepath.Clean(fromPath) path, err := filepath.Abs(fromPath) if err != nil { return nil, err } stat, err := os.Stat(path) if err != nil { return nil, err } if stat.IsDir() { commit := "HEAD" if len(options.Commit) > 0 { commit = options.Commit } info, gitErr := gitRefInfo(repo, clean, commit) if gitErr == nil { options.Commit = info.GitSourceRevision.Commit options.Message = info.GitSourceRevision.Message options.AuthorName = info.GitSourceRevision.Author.Name options.AuthorEmail = info.GitSourceRevision.Author.Email options.CommitterName = info.GitSourceRevision.Committer.Name options.CommitterEmail = info.GitSourceRevision.Committer.Email } else { glog.V(6).Infof("Unable to read Git info from %q: %v", clean, gitErr) } if asRepo { fmt.Fprintf(out, "Uploading %q at commit %q as binary input for the build ...\n", clean, commit) if gitErr != nil { return nil, fmt.Errorf("the directory %q is not a valid Git repository: %v", clean, gitErr) } pr, pw := io.Pipe() go func() { if err := repo.Archive(clean, options.Commit, "tar.gz", pw); err != nil { pw.CloseWithError(fmt.Errorf("unable to create Git archive of %q for build: %v", clean, err)) } else { pw.CloseWithError(io.EOF) } }() r = pr } else { fmt.Fprintf(out, "Uploading directory %q as binary input for the build ...\n", clean) pr, pw := io.Pipe() go func() { w := gzip.NewWriter(pw) if err := tar.New().CreateTarStream(path, false, w); err != nil { pw.CloseWithError(err) } else { w.Close() pw.CloseWithError(io.EOF) } }() r = pr } } else { f, err := os.Open(path) if err != nil { return nil, err } defer f.Close() r = f if asFile { options.AsFile = filepath.Base(path) fmt.Fprintf(out, "Uploading file %q as binary input for the build ...\n", clean) } else { br := bufio.NewReaderSize(f, 4096) r = br if !isArchive(br) { fmt.Fprintf(out, "WARNING: the provided file may not be an archive (tar, tar.gz, or zip), use --as-file\n") } fmt.Fprintf(out, "Uploading archive file %q as binary input for the build ...\n", clean) } } } return client.InstantiateBinary(options, r) }
// CopyMasterConfigToHost copies a local file to the Docker host func (h *HostHelper) CopyMasterConfigToHost(sourceFile, destDir string) error { localDir, err := makeTempCopy(sourceFile) if err != nil { return err } tarHelper := tarhelper.New() tarHelper.SetExclusionPattern(nil) var tarLog io.Writer if glog.V(5) { tarLog = os.Stderr } localTarFile, err := ioutil.TempFile("", "master-config") if err != nil { return err } localTarClosed := false defer func() { if !localTarClosed { errors.LogError(localTarFile.Close()) } }() glog.V(4).Infof("Creating temporary tar %s to upload to %s", localTarFile.Name(), destDir) err = tarHelper.CreateTarStreamWithLogging(localDir, false, localTarFile, tarLog) if err != nil { return err } err = localTarFile.Close() if err != nil { return err } localTarClosed = true localTarInputClosed := false localTarInput, err := os.Open(localTarFile.Name()) if err != nil { return err } defer func() { if !localTarInputClosed { localTarInput.Close() } }() bind := fmt.Sprintf("%s:/var/lib/origin/openshift.local.config:z", destDir) container, err := h.runner(). Image(h.image). Bind(bind).Create() _ = container if err != nil { return err } defer func() { errors.LogError(h.client.RemoveContainer(docker.RemoveContainerOptions{ID: container})) }() glog.V(4).Infof("Uploading tar file %s to remote dir: %s", localTarFile.Name(), destDir) err = h.client.UploadToContainer(container, docker.UploadToContainerOptions{ InputStream: localTarInput, Path: "/var/lib/origin/openshift.local.config/master", }) if err != nil { glog.V(4).Infof("An error occurred uploading the file: %v", err) } else { // If the upload succeeded the local input stream will be closed automatically localTarInputClosed = true glog.V(4).Infof("Successfully uploaded file.") } return err }
// TestDockerfilePath validates that we can use a Dockefile with a custom name, and in a sub-directory func TestDockerfilePath(t *testing.T) { tests := []struct { contextDir string dockerfilePath string dockerStrategy *api.DockerBuildStrategy }{ // default Dockerfile path { dockerfilePath: "Dockerfile", dockerStrategy: &api.DockerBuildStrategy{}, }, // custom Dockerfile path in the root context { dockerfilePath: "mydockerfile", dockerStrategy: &api.DockerBuildStrategy{ DockerfilePath: "mydockerfile", }, }, // custom Dockerfile path in a sub directory { dockerfilePath: "dockerfiles/mydockerfile", dockerStrategy: &api.DockerBuildStrategy{ DockerfilePath: "dockerfiles/mydockerfile", }, }, // custom Dockerfile path in a sub directory // with a contextDir { contextDir: "somedir", dockerfilePath: "dockerfiles/mydockerfile", dockerStrategy: &api.DockerBuildStrategy{ DockerfilePath: "dockerfiles/mydockerfile", }, }, } for _, test := range tests { buildDir, err := ioutil.TempDir("", "dockerfile-path") if err != nil { t.Errorf("failed to create tmpdir: %v", err) continue } absoluteDockerfilePath := filepath.Join(buildDir, test.contextDir, test.dockerfilePath) dockerfileContent := "FROM openshift/origin-base" if err = os.MkdirAll(filepath.Dir(absoluteDockerfilePath), os.FileMode(0750)); err != nil { t.Errorf("failed to create directory %s: %v", filepath.Dir(absoluteDockerfilePath), err) continue } if err = ioutil.WriteFile(absoluteDockerfilePath, []byte(dockerfileContent), os.FileMode(0644)); err != nil { t.Errorf("failed to write dockerfile to %s: %v", absoluteDockerfilePath, err) continue } build := &api.Build{ Spec: api.BuildSpec{ Source: api.BuildSource{ Git: &api.GitBuildSource{ URI: "http://github.com/openshift/origin.git", }, ContextDir: test.contextDir, }, Strategy: api.BuildStrategy{ DockerStrategy: test.dockerStrategy, }, Output: api.BuildOutput{ To: &kapi.ObjectReference{ Kind: "DockerImage", Name: "test/test-result:latest", }, }, }, } dockerClient := &FakeDocker{ buildImageFunc: func(opts docker.BuildImageOptions) error { if opts.Dockerfile != test.dockerfilePath { t.Errorf("Unexpected dockerfile path: %s (expected: %s)", opts.Dockerfile, test.dockerfilePath) } return nil }, } dockerBuilder := &DockerBuilder{ dockerClient: dockerClient, build: build, gitClient: git.NewRepository(), tar: tar.New(), } // this will validate that the Dockerfile is readable // and append some labels to the Dockerfile if err = dockerBuilder.addBuildParameters(buildDir); err != nil { t.Errorf("failed to add build parameters: %v", err) continue } // check that our Dockerfile has been modified dockerfileData, err := ioutil.ReadFile(absoluteDockerfilePath) if err != nil { t.Errorf("failed to read dockerfile %s: %v", absoluteDockerfilePath, err) continue } if !strings.Contains(string(dockerfileData), dockerfileContent) { t.Errorf("Updated Dockerfile content does not contains the original Dockerfile content.\n\nOriginal content:\n%s\n\nUpdated content:\n%s\n", dockerfileContent, string(dockerfileData)) continue } // check that the docker client is called with the right Dockerfile parameter if err = dockerBuilder.dockerBuild(buildDir); err != nil { t.Errorf("failed to build: %v", err) continue } } }
// New returns the instance of STI builder strategy for the given config. // If the layeredBuilder parameter is specified, then the builder provided will // be used for the case that the base Docker image does not have 'tar' or 'bash' // installed. func New(config *api.Config, overrides build.Overrides) (*STI, error) { docker, err := dockerpkg.New(config.DockerConfig, config.PullAuthentication) if err != nil { return nil, err } var incrementalDocker dockerpkg.Docker if config.Incremental { incrementalDocker, err = dockerpkg.New(config.DockerConfig, config.IncrementalAuthentication) if err != nil { return nil, err } } inst := scripts.NewInstaller(config.BuilderImage, config.ScriptsURL, config.ScriptDownloadProxyConfig, docker, config.PullAuthentication) tarHandler := tar.New() tarHandler.SetExclusionPattern(regexp.MustCompile(config.ExcludeRegExp)) builder := &STI{ installer: inst, config: config, docker: docker, incrementalDocker: incrementalDocker, git: git.New(), fs: util.NewFileSystem(), tar: tarHandler, callbackInvoker: util.NewCallbackInvoker(), requiredScripts: []string{api.Assemble, api.Run}, optionalScripts: []string{api.SaveArtifacts}, optionalRuntimeScripts: []string{api.AssembleRuntime}, externalScripts: map[string]bool{}, installedScripts: map[string]bool{}, scriptsURL: map[string]string{}, } if len(config.RuntimeImage) > 0 { builder.runtimeInstaller = scripts.NewInstaller(config.RuntimeImage, config.ScriptsURL, config.ScriptDownloadProxyConfig, docker, config.PullAuthentication) builder.runtimeDocker, err = dockerpkg.New(config.DockerConfig, config.RuntimeAuthentication) if err != nil { return builder, err } } // The sources are downloaded using the Git downloader. // TODO: Add more SCM in future. // TODO: explicit decision made to customize processing for usage specifically vs. // leveraging overrides; also, we ultimately want to simplify s2i usage a good bit, // which would lead to replacing this quick short circuit (so this change is tactical) builder.source = overrides.Downloader if builder.source == nil && !config.Usage { downloader, sourceURL, err := scm.DownloaderForSource(config.Source, config.ForceCopy) if err != nil { return nil, err } builder.source = downloader config.Source = sourceURL } builder.garbage = build.NewDefaultCleaner(builder.fs, builder.docker) builder.layered, err = layered.New(config, builder, overrides) // Set interfaces builder.preparer = builder // later on, if we support say .gitignore func in addition to .dockerignore func, setting // ignorer will be based on config setting builder.ignorer = &ignore.DockerIgnorer{} builder.artifacts = builder builder.scripts = builder builder.postExecutor = builder builder.initPostExecutorSteps() return builder, err }
func extractSourceFromImage(dockerClient DockerClient, image, buildDir string, imageSecretIndex int, paths []api.ImageSourcePath, forcePull bool) error { glog.V(4).Infof("Extracting image source from %s", image) dockerAuth := docker.AuthConfiguration{} if imageSecretIndex != -1 { pullSecret := os.Getenv(fmt.Sprintf("%s%d", dockercfg.PullSourceAuthType, imageSecretIndex)) if len(pullSecret) > 0 { authPresent := false dockerAuth, authPresent = dockercfg.NewHelper().GetDockerAuth(image, fmt.Sprintf("%s%d", dockercfg.PullSourceAuthType, imageSecretIndex)) if authPresent { glog.V(5).Infof("Registry server Address: %s", dockerAuth.ServerAddress) glog.V(5).Infof("Registry server User Name: %s", dockerAuth.Username) glog.V(5).Infof("Registry server Email: %s", dockerAuth.Email) passwordPresent := "<<empty>>" if len(dockerAuth.Password) > 0 { passwordPresent = "<<non-empty>>" } glog.V(5).Infof("Registry server Password: %s", passwordPresent) } } } exists := true if !forcePull { _, err := dockerClient.InspectImage(image) if err != nil && err == docker.ErrNoSuchImage { exists = false } else if err != nil { return err } } if !exists || forcePull { glog.V(0).Infof("Pulling image %q ...", image) if err := dockerClient.PullImage(docker.PullImageOptions{Repository: image}, dockerAuth); err != nil { return fmt.Errorf("error pulling image %v: %v", image, err) } } containerConfig := &docker.Config{Image: image} if inspect, err := dockerClient.InspectImage(image); err != nil { return err } else { // In case the Docker image does not specify the entrypoint if len(inspect.Config.Entrypoint) == 0 && len(inspect.Config.Cmd) == 0 { containerConfig.Entrypoint = []string{"/fake-entrypoint"} } } // Create container to copy from container, err := dockerClient.CreateContainer(docker.CreateContainerOptions{Config: containerConfig}) if err != nil { return fmt.Errorf("error creating source image container: %v", err) } defer dockerClient.RemoveContainer(docker.RemoveContainerOptions{ID: container.ID}) tarHelper := tar.New() tarHelper.SetExclusionPattern(nil) for _, path := range paths { glog.V(4).Infof("Extracting path %s from container %s to %s", path.SourcePath, container.ID, path.DestinationDir) err := copyImageSource(dockerClient, container.ID, path.SourcePath, filepath.Join(buildDir, path.DestinationDir), tarHelper) if err != nil { return fmt.Errorf("error copying source path %s to %s: %v", path.SourcePath, path.DestinationDir, err) } } return nil }
func streamPathToBuild(repo git.Repository, in io.Reader, out io.Writer, client osclient.BuildConfigInterface, fromDir, fromFile, fromRepo string, options *buildapi.BinaryBuildRequestOptions) (*buildapi.Build, error) { count := 0 asDir, asFile, asRepo := len(fromDir) > 0, len(fromFile) > 0, len(fromRepo) > 0 if asDir { count++ } if asFile { count++ } if asRepo { count++ } if count > 1 { return nil, fmt.Errorf("only one of --from-file, --from-repo, or --from-dir may be specified") } if asRepo && !git.IsGitInstalled() { return nil, fmt.Errorf("cannot find git. Git is required to start a build from a repository. If git is not available, use --from-dir instead.") } var r io.Reader switch { case fromFile == "-": return nil, fmt.Errorf("--from-file=- is not supported") case fromDir == "-": br := bufio.NewReaderSize(in, 4096) r = br if !isArchive(br) { fmt.Fprintf(out, "WARNING: the provided file may not be an archive (tar, tar.gz, or zip), use --from-file=- instead\n") } fmt.Fprintf(out, "Uploading archive file from STDIN as binary input for the build ...\n") default: var fromPath string switch { case asDir: fromPath = fromDir case asFile: fromPath = fromFile case asRepo: fromPath = fromRepo } clean := filepath.Clean(fromPath) path, err := filepath.Abs(fromPath) if err != nil { return nil, err } stat, err := os.Stat(path) if err != nil { return nil, err } if stat.IsDir() { commit := "HEAD" if len(options.Commit) > 0 { commit = options.Commit } info, gitErr := gitRefInfo(repo, path, commit) if gitErr == nil { options.Commit = info.GitSourceRevision.Commit options.Message = info.GitSourceRevision.Message options.AuthorName = info.GitSourceRevision.Author.Name options.AuthorEmail = info.GitSourceRevision.Author.Email options.CommitterName = info.GitSourceRevision.Committer.Name options.CommitterEmail = info.GitSourceRevision.Committer.Email } else { glog.V(6).Infof("Unable to read Git info from %q: %v", clean, gitErr) } // NOTE: It's important that this stays false unless we change the // path to something else, otherwise we will delete whatever path the // user provided. var usedTempDir bool = false var tempDirectory string = "" if asRepo { var contextDir string = "" fmt.Fprintf(out, "Uploading %q at commit %q as binary input for the build ...\n", clean, commit) if gitErr != nil { return nil, fmt.Errorf("the directory %q is not a valid Git repository: %v", clean, gitErr) } // If the user doesn't give us the root directory of the Git repo, // we still want the command to work. However, as this may be // unintended, we warn them. if gitRootDir, err := repo.GetRootDir(path); filepath.Clean(gitRootDir) != filepath.Clean(path) && err == nil { fmt.Fprintf(out, "WARNING: Using root dir %s for Git repository\n", gitRootDir) contextDir, _ = filepath.Rel(gitRootDir, path) path = gitRootDir } // Create a temp directory to move the repo contents to tempDirectory, err := ioutil.TempDir(os.TempDir(), "oc_cloning_"+options.Commit) if err != nil { return nil, err } // We only want to grab the contents of the specified commit, with // submodules included cloneOptions := []string{"--recursive"} if verbose := glog.V(3); !verbose { cloneOptions = append(cloneOptions, "--quiet") } // Clone the repository to a temp directory for future tar-ing if err := repo.CloneWithOptions(tempDirectory, path, cloneOptions...); err != nil { return nil, err } if err := repo.Checkout(tempDirectory, commit); err != nil { return nil, err } // We'll continue to use tar on the temp directory path = filepath.Join(tempDirectory, contextDir) usedTempDir = true } else { fmt.Fprintf(out, "Uploading directory %q as binary input for the build ...\n", clean) } pr, pw := io.Pipe() go func() { w := gzip.NewWriter(pw) if err := tar.New().CreateTarStream(path, false, w); err != nil { pw.CloseWithError(err) } else { w.Close() pw.CloseWithError(io.EOF) } if usedTempDir { os.RemoveAll(tempDirectory) } }() r = pr } else { f, err := os.Open(path) if err != nil { return nil, err } defer f.Close() r = f if asFile { options.AsFile = filepath.Base(path) fmt.Fprintf(out, "Uploading file %q as binary input for the build ...\n", clean) } else { br := bufio.NewReaderSize(f, 4096) r = br if !isArchive(br) { fmt.Fprintf(out, "WARNING: the provided file may not be an archive (tar, tar.gz, or zip), use --as-file\n") } fmt.Fprintf(out, "Uploading archive file %q as binary input for the build ...\n", clean) } } } return client.InstantiateBinary(options, r) }