// Actually invoke the docker API to run the resulting s2i image in a container, // where the redirecting of the container's stdout and stderr will go to glog. func (b *DockerRunner) Run(config *api.Config) error { glog.V(4).Infof("Attempting to run image %s \n", config.Tag) errOutput := "" outReader, outWriter := io.Pipe() errReader, errWriter := io.Pipe() defer errReader.Close() defer errWriter.Close() defer outReader.Close() defer outWriter.Close() opts := docker.RunContainerOptions{ Image: config.Tag, Stdout: outWriter, Stderr: errWriter, TargetImage: true, CGroupLimits: config.CGroupLimits, } //NOTE, we've seen some Golang level deadlock issues with the streaming of cmd output to // glog, but part of the deadlock seems to have occurred when stdout was "silent" // and produced no data, such as when we would do a git clone with the --quiet option. // We have not seen the hang when the Cmd produces output to stdout. go docker.StreamContainerIO(errReader, nil, glog.Error) go docker.StreamContainerIO(outReader, nil, glog.Info) rerr := b.ContainerClient.RunContainer(opts) if e, ok := rerr.(errors.ContainerError); ok { return errors.NewContainerError(config.Tag, e.ErrorCode, errOutput) } return nil }
// Save extracts and restores the build artifacts from the previous build to a // current build. func (builder *STI) Save(config *api.Config) (err error) { artifactTmpDir := filepath.Join(config.WorkingDir, "upload", "artifacts") if err = builder.fs.Mkdir(artifactTmpDir); err != nil { return err } image := firstNonEmpty(config.IncrementalFromTag, config.Tag) outReader, outWriter := io.Pipe() defer outReader.Close() defer outWriter.Close() errReader, errWriter := io.Pipe() defer errReader.Close() defer errWriter.Close() glog.V(1).Infof("Saving build artifacts from image %s to path %s", image, artifactTmpDir) extractFunc := func(string) error { return builder.tar.ExtractTarStream(artifactTmpDir, outReader) } user := config.AssembleUser if len(user) == 0 { user, err = builder.docker.GetImageUser(image) if err != nil { return err } glog.V(3).Infof("The assemble user is not set, defaulting to %q user", user) } else { glog.V(3).Infof("Using assemble user %q to extract artifacts", user) } opts := dockerpkg.RunContainerOptions{ Image: image, User: user, ExternalScripts: builder.externalScripts[api.SaveArtifacts], ScriptsURL: config.ScriptsURL, Destination: config.Destination, PullImage: false, Command: api.SaveArtifacts, Stdout: outWriter, Stderr: errWriter, OnStart: extractFunc, NetworkMode: string(config.DockerNetworkMode), CGroupLimits: config.CGroupLimits, CapDrop: config.DropCapabilities, } go dockerpkg.StreamContainerIO(errReader, nil, func(a ...interface{}) { glog.Info(a...) }) err = builder.docker.RunContainer(opts) if e, ok := err.(errors.ContainerError); ok { // even with deferred close above, close errReader now so we avoid data race condition on errOutput; // closing will cause StreamContainerIO to exit, thus releasing the writer in the equation errReader.Close() return errors.NewSaveArtifactsError(image, e.Output, err) } return err }
// Save extracts and restores the build artifacts from the previous build to a // current build. func (b *STI) Save(config *api.Config) (err error) { artifactTmpDir := filepath.Join(config.WorkingDir, "upload", "artifacts") if err = b.fs.Mkdir(artifactTmpDir); err != nil { return err } image := config.IncrementalFromTag if len(image) == 0 { image = config.Tag } outReader, outWriter := io.Pipe() defer outReader.Close() defer outWriter.Close() errReader, errWriter := io.Pipe() defer errReader.Close() defer errWriter.Close() glog.V(1).Infof("Saving build artifacts from image %s to path %s", image, artifactTmpDir) extractFunc := func(string) error { return b.tar.ExtractTarStream(artifactTmpDir, outReader) } user := config.AssembleUser if len(user) == 0 { user, err = b.docker.GetImageUser(image) if err != nil { return err } glog.V(3).Infof("The assemble user is not set, defaulting to %q user", user) } else { glog.V(3).Infof("Using assemble user %q to extract artifacts", user) } opts := dockerpkg.RunContainerOptions{ Image: image, User: user, ExternalScripts: b.externalScripts[api.SaveArtifacts], ScriptsURL: config.ScriptsURL, Destination: config.Destination, PullImage: false, Command: api.SaveArtifacts, Stdout: outWriter, Stderr: errWriter, OnStart: extractFunc, NetworkMode: string(config.DockerNetworkMode), CGroupLimits: config.CGroupLimits, CapDrop: config.DropCapabilities, } go dockerpkg.StreamContainerIO(errReader, nil, glog.Error) err = b.docker.RunContainer(opts) if e, ok := err.(errors.ContainerError); ok { return errors.NewSaveArtifactsError(image, e.Output, err) } return err }
// Run invokes the Docker API to run the image defined in config as a new // container. The container's stdout and stderr will be logged with glog. func (b *DockerRunner) Run(config *api.Config) error { glog.V(4).Infof("Attempting to run image %s \n", config.Tag) outReader, outWriter := io.Pipe() defer outReader.Close() defer outWriter.Close() errReader, errWriter := io.Pipe() defer errReader.Close() defer errWriter.Close() opts := docker.RunContainerOptions{ Image: config.Tag, Entrypoint: sti.DefaultEntrypoint, Stdout: outWriter, Stderr: errWriter, TargetImage: true, CGroupLimits: config.CGroupLimits, CapDrop: config.DropCapabilities, } // NOTE, we've seen some Golang level deadlock issues with the streaming of cmd output to // glog, but part of the deadlock seems to have occurred when stdout was "silent" // and produced no data, such as when we would do a git clone with the --quiet option. // We have not seen the hang when the Cmd produces output to stdout. go docker.StreamContainerIO(errReader, nil, glog.Error) go docker.StreamContainerIO(outReader, nil, glog.Info) err := b.ContainerClient.RunContainer(opts) // If we get a ContainerError, the original message reports the // container name. The container is temporary and its name is // meaningless, therefore we make the error message more helpful by // replacing the container name with the image tag. if e, ok := err.(errors.ContainerError); ok { return errors.NewContainerError(config.Tag, e.ErrorCode, e.Output) } return err }
// Save extracts and restores the build artifacts from the previous build to a // current build. func (b *STI) Save(config *api.Config) (err error) { artifactTmpDir := filepath.Join(config.WorkingDir, "upload", "artifacts") if err = b.fs.Mkdir(artifactTmpDir); err != nil { return err } image := config.Tag outReader, outWriter := io.Pipe() errReader, errWriter := io.Pipe() defer errReader.Close() defer errWriter.Close() glog.V(1).Infof("Saving build artifacts from image %s to path %s", image, artifactTmpDir) extractFunc := func() error { defer outReader.Close() return b.tar.ExtractTarStream(artifactTmpDir, outReader) } opts := dockerpkg.RunContainerOptions{ Image: image, ExternalScripts: b.externalScripts[api.SaveArtifacts], ScriptsURL: config.ScriptsURL, Destination: config.Destination, PullImage: false, Command: api.SaveArtifacts, Stdout: outWriter, Stderr: errWriter, OnStart: extractFunc, NetworkMode: string(config.DockerNetworkMode), } go dockerpkg.StreamContainerIO(errReader, nil, glog.Error) err = b.docker.RunContainer(opts) if e, ok := err.(errors.ContainerError); ok { return errors.NewSaveArtifactsError(image, e.Output, err) } return err }
// Execute runs the specified STI script in the builder image. func (b *STI) Execute(command string, config *api.Config) error { glog.V(2).Infof("Using image name %s", config.BuilderImage) env, err := scripts.GetEnvironment(config) if err != nil { glog.V(1).Infof("No .sti/environment provided (%v)", err) } buildEnv := append(scripts.ConvertEnvironment(env), b.generateConfigEnv()...) errOutput := "" outReader, outWriter := io.Pipe() errReader, errWriter := io.Pipe() defer outReader.Close() defer outWriter.Close() defer errReader.Close() defer errWriter.Close() externalScripts := b.externalScripts[command] // if LayeredBuild is called then all the scripts will be placed inside the image if config.LayeredBuild { externalScripts = false } opts := dockerpkg.RunContainerOptions{ Image: config.BuilderImage, Stdout: outWriter, Stderr: errWriter, PullImage: config.ForcePull, ExternalScripts: externalScripts, ScriptsURL: config.ScriptsURL, Destination: config.Destination, Command: command, Env: buildEnv, PostExec: b.postExecutor, } if !config.LayeredBuild { wg := sync.WaitGroup{} wg.Add(1) uploadDir := filepath.Join(config.WorkingDir, "upload") // TODO: be able to pass a stream directly to the Docker build to avoid the double temp hit r, w := io.Pipe() go func() { var err error defer func() { w.CloseWithError(err) if r := recover(); r != nil { glog.Errorf("recovered panic: %#v", r) } wg.Done() }() err = b.tar.CreateTarStream(uploadDir, false, w) }() opts.Stdin = r defer wg.Wait() } go func(reader io.Reader) { scanner := bufio.NewReader(reader) for { text, err := scanner.ReadString('\n') if err != nil { // we're ignoring ErrClosedPipe, as this is information // the docker container ended streaming logs if glog.V(2) && err != io.ErrClosedPipe { glog.Errorf("Error reading docker stdout, %v", err) } break } if glog.V(2) || config.Quiet != true || command == api.Usage { glog.Info(text) } } }(outReader) go dockerpkg.StreamContainerIO(errReader, &errOutput, glog.Error) err = b.docker.RunContainer(opts) if e, ok := err.(errors.ContainerError); ok { return errors.NewContainerError(config.BuilderImage, e.ErrorCode, errOutput) } return err }
// Execute runs the specified STI script in the builder image. func (b *STI) Execute(command string, user string, config *api.Config) error { glog.V(2).Infof("Using image name %s", config.BuilderImage) env, err := scripts.GetEnvironment(config) if err != nil { glog.V(1).Infof("No user environment provided (%v)", err) } buildEnv := append(scripts.ConvertEnvironment(env), b.generateConfigEnv()...) errOutput := "" outReader, outWriter := io.Pipe() errReader, errWriter := io.Pipe() defer outReader.Close() defer outWriter.Close() defer errReader.Close() defer errWriter.Close() externalScripts := b.externalScripts[command] // if LayeredBuild is called then all the scripts will be placed inside the image if config.LayeredBuild { externalScripts = false } opts := dockerpkg.RunContainerOptions{ Image: config.BuilderImage, Stdout: outWriter, Stderr: errWriter, // The PullImage is false because the PullImage function should be called // before we run the container PullImage: false, ExternalScripts: externalScripts, ScriptsURL: config.ScriptsURL, Destination: config.Destination, Command: command, Env: buildEnv, User: user, PostExec: b.postExecutor, NetworkMode: string(config.DockerNetworkMode), CGroupLimits: config.CGroupLimits, CapDrop: config.DropCapabilities, } // If there are injections specified, override the original assemble script // and wait till all injections are uploaded into the container that runs the // assemble script. injectionComplete := make(chan struct{}) var injectionError error if len(config.Injections) > 0 && command == api.Assemble { workdir, err := b.docker.GetImageWorkdir(config.BuilderImage) if err != nil { return err } util.FixInjectionsWithRelativePath(workdir, &config.Injections) injectedFiles, err := util.ExpandInjectedFiles(config.Injections) if err != nil { return err } rmScript, err := util.CreateInjectedFilesRemovalScript(injectedFiles, "/tmp/rm-injections") if err != nil { return err } defer os.Remove(rmScript) opts.CommandOverrides = func(cmd string) string { return fmt.Sprintf("while [ ! -f %q ]; do sleep 0.5; done; %s; result=$?; source %[1]s; exit $result", "/tmp/rm-injections", cmd) } originalOnStart := opts.OnStart opts.OnStart = func(containerID string) error { defer close(injectionComplete) if err != nil { injectionError = err return err } glog.V(2).Info("starting the injections uploading ...") for _, s := range config.Injections { if err := b.docker.UploadToContainer(s.SourcePath, s.DestinationDir, containerID); err != nil { injectionError = util.HandleInjectionError(s, err) return err } } if err := b.docker.UploadToContainer(rmScript, "/tmp/rm-injections", containerID); err != nil { injectionError = util.HandleInjectionError(api.InjectPath{SourcePath: rmScript, DestinationDir: "/tmp/rm-injections"}, err) return err } if originalOnStart != nil { return originalOnStart(containerID) } return nil } } else { close(injectionComplete) } wg := sync.WaitGroup{} if !config.LayeredBuild { wg.Add(1) uploadDir := filepath.Join(config.WorkingDir, "upload") // TODO: be able to pass a stream directly to the Docker build to avoid the double temp hit r, w := io.Pipe() go func() { // Wait for the injections to complete and check the error. Do not start // streaming the sources when the injection failed. <-injectionComplete if injectionError != nil { wg.Done() return } glog.V(2).Info("starting the source uploading ...") var err error defer func() { w.CloseWithError(err) if r := recover(); r != nil { glog.Errorf("recovered panic: %#v", r) } wg.Done() }() err = b.tar.CreateTarStream(uploadDir, false, w) }() opts.Stdin = r defer wg.Wait() } go func(reader io.Reader) { scanner := bufio.NewReader(reader) for { text, err := scanner.ReadString('\n') if err != nil { // we're ignoring ErrClosedPipe, as this is information // the docker container ended streaming logs if glog.V(2) && err != io.ErrClosedPipe { glog.Errorf("Error reading docker stdout, %v", err) } break } // Nothing is printed when the quiet option is set if config.Quiet { continue } // The log level > 3 forces to use glog instead of printing to stdout if glog.V(3) { glog.Info(text) continue } fmt.Fprintf(os.Stdout, "%s\n", strings.TrimSpace(text)) } }(outReader) go dockerpkg.StreamContainerIO(errReader, &errOutput, glog.Error) err = b.docker.RunContainer(opts) if util.IsTimeoutError(err) { // Cancel waiting for source input if the container timeouts wg.Done() } if e, ok := err.(errors.ContainerError); ok { return errors.NewContainerError(config.BuilderImage, e.ErrorCode, errOutput) } return err }
// Build handles the `docker build` equivalent execution, returning the // success/failure details. func (builder *Layered) Build(config *api.Config) (*api.Result, error) { buildResult := &api.Result{} if config.HasOnBuild && config.BlockOnBuild { buildResult.BuildInfo.FailureReason = utilstatus.NewFailureReason( utilstatus.ReasonOnBuildForbidden, utilstatus.ReasonMessageOnBuildForbidden, ) return buildResult, errors.New("builder image uses ONBUILD instructions but ONBUILD is not allowed") } if config.BuilderImage == "" { buildResult.BuildInfo.FailureReason = utilstatus.NewFailureReason( utilstatus.ReasonGenericS2IBuildFailed, utilstatus.ReasonMessageGenericS2iBuildFailed, ) return buildResult, errors.New("builder image name cannot be empty") } if err := builder.CreateDockerfile(config); err != nil { buildResult.BuildInfo.FailureReason = utilstatus.NewFailureReason( utilstatus.ReasonDockerfileCreateFailed, utilstatus.ReasonMessageDockerfileCreateFailed, ) return buildResult, err } glog.V(2).Info("Creating application source code image") tarStream := builder.tar.CreateTarStreamReader(filepath.Join(config.WorkingDir, "upload"), false) defer tarStream.Close() newBuilderImage := fmt.Sprintf("s2i-layered-temp-image-%d", time.Now().UnixNano()) outReader, outWriter := io.Pipe() opts := docker.BuildImageOptions{ Name: newBuilderImage, Stdin: tarStream, Stdout: outWriter, CGroupLimits: config.CGroupLimits, } docker.StreamContainerIO(outReader, nil, func(s string) { glog.V(2).Info(s) }) glog.V(2).Infof("Building new image %s with scripts and sources already inside", newBuilderImage) if err := builder.docker.BuildImage(opts); err != nil { buildResult.BuildInfo.FailureReason = utilstatus.NewFailureReason( utilstatus.ReasonDockerImageBuildFailed, utilstatus.ReasonMessageDockerImageBuildFailed, ) return buildResult, err } // upon successful build we need to modify current config builder.config.LayeredBuild = true // new image name builder.config.BuilderImage = newBuilderImage // see CreateDockerfile, conditional copy, location of scripts scriptsIncluded := checkValidDirWithContents(path.Join(config.WorkingDir, api.UploadScripts)) glog.V(2).Infof("Scripts dir has contents %v", scriptsIncluded) if scriptsIncluded { builder.config.ScriptsURL = "image://" + path.Join(getDestination(config), "scripts") } else { var err error builder.config.ScriptsURL, err = builder.docker.GetScriptsURL(newBuilderImage) if err != nil { buildResult.BuildInfo.FailureReason = utilstatus.NewFailureReason( utilstatus.ReasonGenericS2IBuildFailed, utilstatus.ReasonMessageGenericS2iBuildFailed, ) return buildResult, err } } glog.V(2).Infof("Building %s using sti-enabled image", builder.config.Tag) if err := builder.scripts.Execute(api.Assemble, config.AssembleUser, builder.config); err != nil { buildResult.BuildInfo.FailureReason = utilstatus.NewFailureReason( utilstatus.ReasonAssembleFailed, utilstatus.ReasonMessageAssembleFailed, ) switch e := err.(type) { case s2ierr.ContainerError: return buildResult, s2ierr.NewAssembleError(builder.config.Tag, e.Output, e) default: return buildResult, err } } buildResult.Success = true return buildResult, nil }
// Execute runs the specified STI script in the builder image. func (b *STI) Execute(command string, config *api.Config) error { glog.V(2).Infof("Using image name %s", config.BuilderImage) env, err := scripts.GetEnvironment(config) if err != nil { glog.V(1).Infof("No .sti/environment provided (%v)", err) } buildEnv := append(scripts.ConvertEnvironment(env), b.generateConfigEnv()...) uploadDir := filepath.Join(config.WorkingDir, "upload") tarFileName, err := b.tar.CreateTarFile(config.WorkingDir, uploadDir) if err != nil { return err } tarFile, err := b.fs.Open(tarFileName) if err != nil { return err } defer tarFile.Close() errOutput := "" outReader, outWriter := io.Pipe() errReader, errWriter := io.Pipe() defer outReader.Close() defer outWriter.Close() defer errReader.Close() defer errWriter.Close() externalScripts := b.externalScripts[command] // if LayeredBuild is called then all the scripts will be placed inside the image if config.LayeredBuild { externalScripts = false } opts := dockerpkg.RunContainerOptions{ Image: config.BuilderImage, Stdout: outWriter, Stderr: errWriter, PullImage: config.ForcePull, ExternalScripts: externalScripts, ScriptsURL: config.ScriptsURL, Destination: config.Destination, Command: command, Env: buildEnv, PostExec: b.postExecutor, } if !config.LayeredBuild { opts.Stdin = tarFile } go func(reader io.Reader) { scanner := bufio.NewReader(reader) for { text, err := scanner.ReadString('\n') if err != nil { // we're ignoring ErrClosedPipe, as this is information // the docker container ended streaming logs if glog.V(2) && err != io.ErrClosedPipe { glog.Errorf("Error reading docker stdout, %v", err) } break } if glog.V(2) || config.Quiet != true || command == api.Usage { glog.Info(text) } } }(outReader) go dockerpkg.StreamContainerIO(errReader, &errOutput, glog.Error) err = b.docker.RunContainer(opts) if e, ok := err.(errors.ContainerError); ok { return errors.NewContainerError(config.BuilderImage, e.ErrorCode, errOutput) } return err }
// Execute runs the specified STI script in the builder image. func (builder *STI) Execute(command string, user string, config *api.Config) error { glog.V(2).Infof("Using image name %s", config.BuilderImage) // we can't invoke this method before (for example in New() method) // because of later initialization of config.WorkingDir builder.env = createBuildEnvironment(config) errOutput := "" outReader, outWriter := io.Pipe() errReader, errWriter := io.Pipe() defer outReader.Close() defer outWriter.Close() defer errReader.Close() defer errWriter.Close() externalScripts := builder.externalScripts[command] // if LayeredBuild is called then all the scripts will be placed inside the image if config.LayeredBuild { externalScripts = false } opts := dockerpkg.RunContainerOptions{ Image: config.BuilderImage, Stdout: outWriter, Stderr: errWriter, // The PullImage is false because the PullImage function should be called // before we run the container PullImage: false, ExternalScripts: externalScripts, ScriptsURL: config.ScriptsURL, Destination: config.Destination, Command: command, Env: builder.env, User: user, PostExec: builder.postExecutor, NetworkMode: string(config.DockerNetworkMode), CGroupLimits: config.CGroupLimits, CapDrop: config.DropCapabilities, Binds: config.BuildVolumes.AsBinds(), } // If there are injections specified, override the original assemble script // and wait till all injections are uploaded into the container that runs the // assemble script. injectionComplete := make(chan struct{}) var injectionError error if len(config.Injections) > 0 && command == api.Assemble { workdir, err := builder.docker.GetImageWorkdir(config.BuilderImage) if err != nil { builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonGenericS2IBuildFailed, utilstatus.ReasonMessageGenericS2iBuildFailed) return err } config.Injections = util.FixInjectionsWithRelativePath(workdir, config.Injections) injectedFiles, err := util.ExpandInjectedFiles(config.Injections) if err != nil { builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonInstallScriptsFailed, utilstatus.ReasonMessageInstallScriptsFailed) return err } rmScript, err := util.CreateInjectedFilesRemovalScript(injectedFiles, "/tmp/rm-injections") if err != nil { builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonGenericS2IBuildFailed, utilstatus.ReasonMessageGenericS2iBuildFailed) return err } defer os.Remove(rmScript) opts.CommandOverrides = func(cmd string) string { return fmt.Sprintf("while [ ! -f %q ]; do sleep 0.5; done; %s; result=$?; source %[1]s; exit $result", "/tmp/rm-injections", cmd) } originalOnStart := opts.OnStart opts.OnStart = func(containerID string) error { defer close(injectionComplete) if err != nil { injectionError = err return err } glog.V(2).Info("starting the injections uploading ...") for _, s := range config.Injections { if err := builder.docker.UploadToContainer(s.Source, s.Destination, containerID); err != nil { injectionError = util.HandleInjectionError(s, err) return err } } if err := builder.docker.UploadToContainer(rmScript, "/tmp/rm-injections", containerID); err != nil { injectionError = util.HandleInjectionError(api.VolumeSpec{Source: rmScript, Destination: "/tmp/rm-injections"}, err) return err } if originalOnStart != nil { return originalOnStart(containerID) } return nil } } else { close(injectionComplete) } wg := sync.WaitGroup{} if !config.LayeredBuild { wg.Add(1) uploadDir := filepath.Join(config.WorkingDir, "upload") // TODO: be able to pass a stream directly to the Docker build to avoid the double temp hit r, w := io.Pipe() go func() { // reminder, multiple defers follow a stack, LIFO order of processing defer wg.Done() // Wait for the injections to complete and check the error. Do not start // streaming the sources when the injection failed. <-injectionComplete if injectionError != nil { return } glog.V(2).Info("starting the source uploading ...") var err error defer func() { w.CloseWithError(err) if r := recover(); r != nil { glog.Errorf("recovered panic: %#v", r) } }() err = builder.tar.CreateTarStream(uploadDir, false, w) }() opts.Stdin = r } go func(reader io.Reader) { scanner := bufio.NewReader(reader) // Precede build output with newline glog.Info() for { text, err := scanner.ReadString('\n') if err != nil { // we're ignoring ErrClosedPipe, as this is information // the docker container ended streaming logs if glog.Is(2) && err != io.ErrClosedPipe && err != io.EOF { glog.Errorf("Error reading docker stdout, %#v", err) } break } // Nothing is printed when the quiet option is set if config.Quiet { continue } glog.Info(strings.TrimSpace(text)) } // Terminate build output with new line glog.Info() }(outReader) go dockerpkg.StreamContainerIO(errReader, &errOutput, func(a ...interface{}) { glog.Info(a...) }) err := builder.docker.RunContainer(opts) if e, ok := err.(errors.ContainerError); ok { // even with deferred close above, close errReader now so we avoid data race condition on errOutput; // closing will cause StreamContainerIO to exit, thus releasing the writer in the equation errReader.Close() return errors.NewContainerError(config.BuilderImage, e.ErrorCode, errOutput) } // Do not wait for source input if the container times out. // FIXME: this potentially leaks a goroutine. if !util.IsTimeoutError(err) { wg.Wait() } return err }
func (step *startRuntimeImageAndUploadFilesStep) execute(ctx *postExecutorStepContext) error { glog.V(3).Info("Executing step: start runtime image and upload files") fd, err := ioutil.TempFile("", "s2i-upload-done") if err != nil { return err } fd.Close() lastFilePath := fd.Name() defer func() { os.Remove(lastFilePath) }() lastFileDstPath := "/tmp/" + filepath.Base(lastFilePath) outReader, outWriter := io.Pipe() errReader, errWriter := io.Pipe() artifactsDir := filepath.Join(step.builder.config.WorkingDir, api.RuntimeArtifactsDir) // We copy scripts to a directory with artifacts to upload files in one shot for _, script := range []string{api.AssembleRuntime, api.Run} { // scripts must be inside of "scripts" subdir, see createCommandForExecutingRunScript() destinationDir := filepath.Join(artifactsDir, "scripts") err = step.copyScriptIfNeeded(script, destinationDir) if err != nil { return err } } image := step.builder.config.RuntimeImage workDir, err := step.docker.GetImageWorkdir(image) if err != nil { return fmt.Errorf("could not get working dir of %q image: %v", image, err) } commandBaseDir := filepath.Join(workDir, "scripts") useExternalAssembleScript := step.builder.externalScripts[api.AssembleRuntime] if !useExternalAssembleScript { // script already inside of the image var scriptsURL string scriptsURL, err = step.docker.GetScriptsURL(image) if err != nil { return err } if len(scriptsURL) == 0 { return fmt.Errorf("could not determine scripts URL for image %q", image) } commandBaseDir = strings.TrimPrefix(scriptsURL, "image://") } cmd := fmt.Sprintf( "while [ ! -f %q ]; do sleep 0.5; done; %s/%s; exit $?", lastFileDstPath, commandBaseDir, api.AssembleRuntime, ) opts := dockerpkg.RunContainerOptions{ Image: image, PullImage: false, // The PullImage is false because we've already pulled the image CommandExplicit: []string{"/bin/sh", "-c", cmd}, Stdout: outWriter, Stderr: errWriter, NetworkMode: string(step.builder.config.DockerNetworkMode), CGroupLimits: step.builder.config.CGroupLimits, CapDrop: step.builder.config.DropCapabilities, PostExec: step.builder.postExecutor, Env: step.builder.env, } opts.OnStart = func(containerID string) error { setStandardPerms := func(writer io.Writer) s2itar.Writer { return s2itar.ChmodAdapter{Writer: tar.NewWriter(writer), NewFileMode: 0644, NewExecFileMode: 0755, NewDirMode: 0755} } glog.V(5).Infof("Uploading directory %q -> %q", artifactsDir, workDir) onStartErr := step.docker.UploadToContainerWithTarWriter(step.fs, artifactsDir, workDir, containerID, setStandardPerms) if onStartErr != nil { return fmt.Errorf("could not upload directory (%q -> %q) into container %s: %v", artifactsDir, workDir, containerID, err) } glog.V(5).Infof("Uploading file %q -> %q", lastFilePath, lastFileDstPath) onStartErr = step.docker.UploadToContainerWithTarWriter(step.fs, lastFilePath, lastFileDstPath, containerID, setStandardPerms) if onStartErr != nil { return fmt.Errorf("could not upload file (%q -> %q) into container %s: %v", lastFilePath, lastFileDstPath, containerID, err) } return onStartErr } dockerpkg.StreamContainerIO(outReader, nil, func(s string) { glog.V(0).Info(s) }) errOutput := "" c := dockerpkg.StreamContainerIO(errReader, &errOutput, func(s string) { glog.Info(s) }) // switch to the next stage of post executors steps step.builder.postExecutorStage++ err = step.docker.RunContainer(opts) if e, ok := err.(s2ierr.ContainerError); ok { // Must wait for StreamContainerIO goroutine above to exit before reading errOutput. <-c err = s2ierr.NewContainerError(image, e.ErrorCode, errOutput) } return err }
func (step *startRuntimeImageAndUploadFilesStep) execute(ctx *postExecutorStepContext) error { glog.V(3).Info("Executing step: start runtime image and upload files") fd, err := ioutil.TempFile("", "s2i-upload-done") if err != nil { return err } fd.Close() lastFilePath := fd.Name() defer func() { os.Remove(lastFilePath) }() lastFileDstPath := "/tmp/" + filepath.Base(lastFilePath) outReader, outWriter := io.Pipe() defer outReader.Close() defer outWriter.Close() errReader, errWriter := io.Pipe() defer errReader.Close() defer errWriter.Close() artifactsDir := filepath.Join(step.builder.config.WorkingDir, api.RuntimeArtifactsDir) // We copy scripts to a directory with artifacts to upload files in one shot for _, script := range []string{api.AssembleRuntime, api.Run} { // scripts must be inside of "scripts" subdir, see createCommandForExecutingRunScript() destinationDir := filepath.Join(artifactsDir, "scripts") if err := step.copyScriptIfNeeded(script, destinationDir); err != nil { return err } } image := step.builder.config.RuntimeImage workDir, err := step.docker.GetImageWorkdir(image) if err != nil { return fmt.Errorf("Couldn't get working dir of %q image: %v", image, err) } commandBaseDir := filepath.Join(workDir, "scripts") useExternalAssembleScript := step.builder.externalScripts[api.AssembleRuntime] if !useExternalAssembleScript { // script already inside of the image scriptsURL, err := step.docker.GetScriptsURL(image) if err != nil { return err } if len(scriptsURL) == 0 { return fmt.Errorf("Couldn't determine scripts URL for image %q", image) } commandBaseDir = strings.TrimPrefix(scriptsURL, "image://") } cmd := fmt.Sprintf( "while [ ! -f %q ]; do sleep 0.5; done; %s/%s; exit $?", lastFileDstPath, commandBaseDir, api.AssembleRuntime, ) opts := dockerpkg.RunContainerOptions{ Image: image, Entrypoint: DefaultEntrypoint, PullImage: false, // The PullImage is false because we've already pulled the image CommandExplicit: []string{"/bin/sh", "-c", cmd}, Stdout: outWriter, Stderr: errWriter, NetworkMode: string(step.builder.config.DockerNetworkMode), CGroupLimits: step.builder.config.CGroupLimits, CapDrop: step.builder.config.DropCapabilities, PostExec: step.builder.postExecutor, Env: step.builder.env, } opts.OnStart = func(containerID string) error { setStandardPerms := func(path string, info os.FileInfo, err error) error { if err != nil { return err } // chmod does nothing on windows anyway. if runtime.GOOS == "windows" { return nil } // Skip chmod for symlinks if info.Mode()&os.ModeSymlink != 0 { return nil } // file should be writable by owner (u=w) and readable by other users (a=r), // executable bit should be left as is mode := os.FileMode(0644) // syscall.S_IEXEC == 0x40 but we can't reference the constant if we want // to build releases for windows. if info.IsDir() || info.Mode()&0x40 != 0 { mode = 0755 } return step.fs.Chmod(path, mode) } glog.V(5).Infof("Uploading directory %q -> %q", artifactsDir, workDir) if err := step.docker.UploadToContainerWithCallback(artifactsDir, workDir, containerID, setStandardPerms, true); err != nil { return fmt.Errorf("Couldn't upload directory (%q -> %q) into container %s: %v", artifactsDir, workDir, containerID, err) } glog.V(5).Infof("Uploading file %q -> %q", lastFilePath, lastFileDstPath) if err := step.docker.UploadToContainerWithCallback(lastFilePath, lastFileDstPath, containerID, setStandardPerms, true); err != nil { return fmt.Errorf("Couldn't upload file (%q -> %q) into container %s: %v", lastFilePath, lastFileDstPath, containerID, err) } return err } go dockerpkg.StreamContainerIO(outReader, nil, func(a ...interface{}) { glog.V(0).Info(a...) }) errOutput := "" go dockerpkg.StreamContainerIO(errReader, &errOutput, func(a ...interface{}) { glog.Info(a...) }) // switch to the next stage of post executors steps step.builder.postExecutorStage++ err = step.docker.RunContainer(opts) if e, ok := err.(errors.ContainerError); ok { return errors.NewContainerError(image, e.ErrorCode, errOutput) } return nil }
// Execute runs the specified STI script in the builder image. func (builder *STI) Execute(command string, user string, config *api.Config) error { glog.V(2).Infof("Using image name %s", config.BuilderImage) // we can't invoke this method before (for example in New() method) // because of later initialization of config.WorkingDir builder.env = createBuildEnvironment(config) errOutput := "" outReader, outWriter := io.Pipe() errReader, errWriter := io.Pipe() externalScripts := builder.externalScripts[command] // if LayeredBuild is called then all the scripts will be placed inside the image if config.LayeredBuild { externalScripts = false } opts := dockerpkg.RunContainerOptions{ Image: config.BuilderImage, Stdout: outWriter, Stderr: errWriter, // The PullImage is false because the PullImage function should be called // before we run the container PullImage: false, ExternalScripts: externalScripts, ScriptsURL: config.ScriptsURL, Destination: config.Destination, Command: command, Env: builder.env, User: user, PostExec: builder.postExecutor, NetworkMode: string(config.DockerNetworkMode), CGroupLimits: config.CGroupLimits, CapDrop: config.DropCapabilities, Binds: config.BuildVolumes.AsBinds(), } // If there are injections specified, override the original assemble script // and wait till all injections are uploaded into the container that runs the // assemble script. injectionError := make(chan error) if len(config.Injections) > 0 && command == api.Assemble { workdir, err := builder.docker.GetImageWorkdir(config.BuilderImage) if err != nil { builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason( utilstatus.ReasonGenericS2IBuildFailed, utilstatus.ReasonMessageGenericS2iBuildFailed, ) return err } config.Injections = util.FixInjectionsWithRelativePath(workdir, config.Injections) injectedFiles, err := util.ExpandInjectedFiles(builder.fs, config.Injections) if err != nil { builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason( utilstatus.ReasonInstallScriptsFailed, utilstatus.ReasonMessageInstallScriptsFailed, ) return err } rmScript, err := util.CreateInjectedFilesRemovalScript(injectedFiles, "/tmp/rm-injections") if err != nil { builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason( utilstatus.ReasonGenericS2IBuildFailed, utilstatus.ReasonMessageGenericS2iBuildFailed, ) return err } defer os.Remove(rmScript) opts.CommandOverrides = func(cmd string) string { return fmt.Sprintf("while [ ! -f %q ]; do sleep 0.5; done; %s; result=$?; source %[1]s; exit $result", "/tmp/rm-injections", cmd) } originalOnStart := opts.OnStart opts.OnStart = func(containerID string) error { defer close(injectionError) glog.V(2).Info("starting the injections uploading ...") for _, s := range config.Injections { if err := builder.docker.UploadToContainer(builder.fs, s.Source, s.Destination, containerID); err != nil { injectionError <- util.HandleInjectionError(s, err) return err } } if err := builder.docker.UploadToContainer(builder.fs, rmScript, "/tmp/rm-injections", containerID); err != nil { injectionError <- util.HandleInjectionError(api.VolumeSpec{Source: rmScript, Destination: "/tmp/rm-injections"}, err) return err } if originalOnStart != nil { return originalOnStart(containerID) } return nil } } else { close(injectionError) } if !config.LayeredBuild { r, w := io.Pipe() opts.Stdin = r go func() { // Wait for the injections to complete and check the error. Do not start // streaming the sources when the injection failed. if <-injectionError != nil { w.Close() return } glog.V(2).Info("starting the source uploading ...") uploadDir := filepath.Join(config.WorkingDir, "upload") w.CloseWithError(builder.tar.CreateTarStream(uploadDir, false, w)) }() } dockerpkg.StreamContainerIO(outReader, nil, func(s string) { if !config.Quiet { glog.Info(strings.TrimSpace(s)) } }) c := dockerpkg.StreamContainerIO(errReader, &errOutput, func(s string) { glog.Info(s) }) err := builder.docker.RunContainer(opts) if e, ok := err.(s2ierr.ContainerError); ok { // Must wait for StreamContainerIO goroutine above to exit before reading errOutput. <-c err = s2ierr.NewContainerError(config.BuilderImage, e.ErrorCode, errOutput) } return err }
// Save extracts and restores the build artifacts from the previous build to a // current build. func (builder *STI) Save(config *api.Config) (err error) { artifactTmpDir := filepath.Join(config.WorkingDir, "upload", "artifacts") if builder.result == nil { builder.result = &api.Result{} } if err = builder.fs.Mkdir(artifactTmpDir); err != nil { builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason( utilstatus.ReasonFSOperationFailed, utilstatus.ReasonMessageFSOperationFailed, ) return err } image := firstNonEmpty(config.IncrementalFromTag, config.Tag) outReader, outWriter := io.Pipe() errReader, errWriter := io.Pipe() glog.V(1).Infof("Saving build artifacts from image %s to path %s", image, artifactTmpDir) extractFunc := func(string) error { extractErr := builder.tar.ExtractTarStream(artifactTmpDir, outReader) io.Copy(ioutil.Discard, outReader) // must ensure reader from container is drained return extractErr } user := config.AssembleUser if len(user) == 0 { user, err = builder.docker.GetImageUser(image) if err != nil { builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason( utilstatus.ReasonGenericS2IBuildFailed, utilstatus.ReasonMessageGenericS2iBuildFailed, ) return err } glog.V(3).Infof("The assemble user is not set, defaulting to %q user", user) } else { glog.V(3).Infof("Using assemble user %q to extract artifacts", user) } opts := dockerpkg.RunContainerOptions{ Image: image, User: user, ExternalScripts: builder.externalScripts[api.SaveArtifacts], ScriptsURL: config.ScriptsURL, Destination: config.Destination, PullImage: false, Command: api.SaveArtifacts, Stdout: outWriter, Stderr: errWriter, OnStart: extractFunc, NetworkMode: string(config.DockerNetworkMode), CGroupLimits: config.CGroupLimits, CapDrop: config.DropCapabilities, } dockerpkg.StreamContainerIO(errReader, nil, func(s string) { glog.Info(s) }) err = builder.docker.RunContainer(opts) if e, ok := err.(s2ierr.ContainerError); ok { err = s2ierr.NewSaveArtifactsError(image, e.Output, err) } builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason( utilstatus.ReasonGenericS2IBuildFailed, utilstatus.ReasonMessageGenericS2iBuildFailed, ) return err }