func TestSaveArtifactsRunError(t *testing.T) { tests := []error{ fmt.Errorf("Run error"), stierr.NewContainerError("", -1, ""), } expected := []error{ tests[0], stierr.NewSaveArtifactsError("", "", tests[1]), } // test with tar extract error or not tarError := []bool{true, false} for i := range tests { for _, te := range tarError { bh := testBuildHandler() fd := bh.docker.(*docker.FakeDocker) th := bh.tar.(*test.FakeTar) fd.RunContainerError = tests[i] if te { th.ExtractTarError = fmt.Errorf("tar error") } err := bh.Save(bh.config) if !te && err != expected[i] { t.Errorf("Unexpected error returned from saveArtifacts: %v", err) } else if te && err != th.ExtractTarError { t.Errorf("Expected tar error. Got %v", err) } } } }
// Actually invoke the docker API to run the resulting s2i image in a container, // where the redirecting of the container's stdout and stderr will go to glog. func (b *DockerRunner) Run(config *api.Config) error { glog.V(4).Infof("Attempting to run image %s \n", config.Tag) errOutput := "" outReader, outWriter := io.Pipe() errReader, errWriter := io.Pipe() defer errReader.Close() defer errWriter.Close() defer outReader.Close() defer outWriter.Close() opts := docker.RunContainerOptions{ Image: config.Tag, Stdout: outWriter, Stderr: errWriter, TargetImage: true, CGroupLimits: config.CGroupLimits, } //NOTE, we've seen some Golang level deadlock issues with the streaming of cmd output to // glog, but part of the deadlock seems to have occurred when stdout was "silent" // and produced no data, such as when we would do a git clone with the --quiet option. // We have not seen the hang when the Cmd produces output to stdout. go docker.StreamContainerIO(errReader, nil, glog.Error) go docker.StreamContainerIO(outReader, nil, glog.Info) rerr := b.ContainerClient.RunContainer(opts) if e, ok := rerr.(errors.ContainerError); ok { return errors.NewContainerError(config.Tag, e.ErrorCode, errOutput) } return nil }
// this funtion simply abstracts out the waiting on the container hosting the builder function that was originally in line with the RunContainer() method func runContainerWait(wg *sync.WaitGroup, d *stiDocker, container *docker.Container) error { glog.V(2).Infof("Waiting for container") exitCode, err := d.client.WaitContainer(container.ID) glog.V(2).Infof("Container wait returns with %d and %v\n", exitCode, err) wg.Wait() if err != nil { return err } glog.V(2).Infof("Container exited") if exitCode != 0 { return errors.NewContainerError(container.Name, exitCode, "") } return nil }
func TestLayeredBuild(t *testing.T) { fh := &FakeSTI{ BuildRequest: &api.Config{ BuilderImage: "testimage", }, BuildResult: &api.Result{}, ExecuteError: stierr.NewContainerError("", 1, `/bin/sh: tar: not found`), ExpectedError: true, } builder := newFakeSTI(fh) builder.Build(&api.Config{BuilderImage: "testimage"}) // Verify layered build if !fh.LayeredBuildCalled { t.Errorf("Layered build was not called.") } }
// Run invokes the Docker API to run the image defined in config as a new // container. The container's stdout and stderr will be logged with glog. func (b *DockerRunner) Run(config *api.Config) error { glog.V(4).Infof("Attempting to run image %s \n", config.Tag) outReader, outWriter := io.Pipe() defer outReader.Close() defer outWriter.Close() errReader, errWriter := io.Pipe() defer errReader.Close() defer errWriter.Close() opts := docker.RunContainerOptions{ Image: config.Tag, Entrypoint: sti.DefaultEntrypoint, Stdout: outWriter, Stderr: errWriter, TargetImage: true, CGroupLimits: config.CGroupLimits, CapDrop: config.DropCapabilities, } // NOTE, we've seen some Golang level deadlock issues with the streaming of cmd output to // glog, but part of the deadlock seems to have occurred when stdout was "silent" // and produced no data, such as when we would do a git clone with the --quiet option. // We have not seen the hang when the Cmd produces output to stdout. go docker.StreamContainerIO(errReader, nil, glog.Error) go docker.StreamContainerIO(outReader, nil, glog.Info) err := b.ContainerClient.RunContainer(opts) // If we get a ContainerError, the original message reports the // container name. The container is temporary and its name is // meaningless, therefore we make the error message more helpful by // replacing the container name with the image tag. if e, ok := err.(errors.ContainerError); ok { return errors.NewContainerError(config.Tag, e.ErrorCode, e.Output) } return err }
// Execute runs the specified STI script in the builder image. func (b *STI) Execute(command string, user string, config *api.Config) error { glog.V(2).Infof("Using image name %s", config.BuilderImage) env, err := scripts.GetEnvironment(config) if err != nil { glog.V(1).Infof("No user environment provided (%v)", err) } buildEnv := append(scripts.ConvertEnvironment(env), b.generateConfigEnv()...) errOutput := "" outReader, outWriter := io.Pipe() errReader, errWriter := io.Pipe() defer outReader.Close() defer outWriter.Close() defer errReader.Close() defer errWriter.Close() externalScripts := b.externalScripts[command] // if LayeredBuild is called then all the scripts will be placed inside the image if config.LayeredBuild { externalScripts = false } opts := dockerpkg.RunContainerOptions{ Image: config.BuilderImage, Stdout: outWriter, Stderr: errWriter, // The PullImage is false because the PullImage function should be called // before we run the container PullImage: false, ExternalScripts: externalScripts, ScriptsURL: config.ScriptsURL, Destination: config.Destination, Command: command, Env: buildEnv, User: user, PostExec: b.postExecutor, NetworkMode: string(config.DockerNetworkMode), CGroupLimits: config.CGroupLimits, CapDrop: config.DropCapabilities, } // If there are injections specified, override the original assemble script // and wait till all injections are uploaded into the container that runs the // assemble script. injectionComplete := make(chan struct{}) var injectionError error if len(config.Injections) > 0 && command == api.Assemble { workdir, err := b.docker.GetImageWorkdir(config.BuilderImage) if err != nil { return err } util.FixInjectionsWithRelativePath(workdir, &config.Injections) injectedFiles, err := util.ExpandInjectedFiles(config.Injections) if err != nil { return err } rmScript, err := util.CreateInjectedFilesRemovalScript(injectedFiles, "/tmp/rm-injections") if err != nil { return err } defer os.Remove(rmScript) opts.CommandOverrides = func(cmd string) string { return fmt.Sprintf("while [ ! -f %q ]; do sleep 0.5; done; %s; result=$?; source %[1]s; exit $result", "/tmp/rm-injections", cmd) } originalOnStart := opts.OnStart opts.OnStart = func(containerID string) error { defer close(injectionComplete) if err != nil { injectionError = err return err } glog.V(2).Info("starting the injections uploading ...") for _, s := range config.Injections { if err := b.docker.UploadToContainer(s.SourcePath, s.DestinationDir, containerID); err != nil { injectionError = util.HandleInjectionError(s, err) return err } } if err := b.docker.UploadToContainer(rmScript, "/tmp/rm-injections", containerID); err != nil { injectionError = util.HandleInjectionError(api.InjectPath{SourcePath: rmScript, DestinationDir: "/tmp/rm-injections"}, err) return err } if originalOnStart != nil { return originalOnStart(containerID) } return nil } } else { close(injectionComplete) } wg := sync.WaitGroup{} if !config.LayeredBuild { wg.Add(1) uploadDir := filepath.Join(config.WorkingDir, "upload") // TODO: be able to pass a stream directly to the Docker build to avoid the double temp hit r, w := io.Pipe() go func() { // Wait for the injections to complete and check the error. Do not start // streaming the sources when the injection failed. <-injectionComplete if injectionError != nil { wg.Done() return } glog.V(2).Info("starting the source uploading ...") var err error defer func() { w.CloseWithError(err) if r := recover(); r != nil { glog.Errorf("recovered panic: %#v", r) } wg.Done() }() err = b.tar.CreateTarStream(uploadDir, false, w) }() opts.Stdin = r defer wg.Wait() } go func(reader io.Reader) { scanner := bufio.NewReader(reader) for { text, err := scanner.ReadString('\n') if err != nil { // we're ignoring ErrClosedPipe, as this is information // the docker container ended streaming logs if glog.V(2) && err != io.ErrClosedPipe { glog.Errorf("Error reading docker stdout, %v", err) } break } // Nothing is printed when the quiet option is set if config.Quiet { continue } // The log level > 3 forces to use glog instead of printing to stdout if glog.V(3) { glog.Info(text) continue } fmt.Fprintf(os.Stdout, "%s\n", strings.TrimSpace(text)) } }(outReader) go dockerpkg.StreamContainerIO(errReader, &errOutput, glog.Error) err = b.docker.RunContainer(opts) if util.IsTimeoutError(err) { // Cancel waiting for source input if the container timeouts wg.Done() } if e, ok := err.(errors.ContainerError); ok { return errors.NewContainerError(config.BuilderImage, e.ErrorCode, errOutput) } return err }
// Execute runs the specified STI script in the builder image. func (builder *STI) Execute(command string, user string, config *api.Config) error { glog.V(2).Infof("Using image name %s", config.BuilderImage) // we can't invoke this method before (for example in New() method) // because of later initialization of config.WorkingDir builder.env = createBuildEnvironment(config) errOutput := "" outReader, outWriter := io.Pipe() errReader, errWriter := io.Pipe() defer outReader.Close() defer outWriter.Close() defer errReader.Close() defer errWriter.Close() externalScripts := builder.externalScripts[command] // if LayeredBuild is called then all the scripts will be placed inside the image if config.LayeredBuild { externalScripts = false } opts := dockerpkg.RunContainerOptions{ Image: config.BuilderImage, Stdout: outWriter, Stderr: errWriter, // The PullImage is false because the PullImage function should be called // before we run the container PullImage: false, ExternalScripts: externalScripts, ScriptsURL: config.ScriptsURL, Destination: config.Destination, Command: command, Env: builder.env, User: user, PostExec: builder.postExecutor, NetworkMode: string(config.DockerNetworkMode), CGroupLimits: config.CGroupLimits, CapDrop: config.DropCapabilities, Binds: config.BuildVolumes.AsBinds(), } // If there are injections specified, override the original assemble script // and wait till all injections are uploaded into the container that runs the // assemble script. injectionComplete := make(chan struct{}) var injectionError error if len(config.Injections) > 0 && command == api.Assemble { workdir, err := builder.docker.GetImageWorkdir(config.BuilderImage) if err != nil { builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonGenericS2IBuildFailed, utilstatus.ReasonMessageGenericS2iBuildFailed) return err } config.Injections = util.FixInjectionsWithRelativePath(workdir, config.Injections) injectedFiles, err := util.ExpandInjectedFiles(config.Injections) if err != nil { builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonInstallScriptsFailed, utilstatus.ReasonMessageInstallScriptsFailed) return err } rmScript, err := util.CreateInjectedFilesRemovalScript(injectedFiles, "/tmp/rm-injections") if err != nil { builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonGenericS2IBuildFailed, utilstatus.ReasonMessageGenericS2iBuildFailed) return err } defer os.Remove(rmScript) opts.CommandOverrides = func(cmd string) string { return fmt.Sprintf("while [ ! -f %q ]; do sleep 0.5; done; %s; result=$?; source %[1]s; exit $result", "/tmp/rm-injections", cmd) } originalOnStart := opts.OnStart opts.OnStart = func(containerID string) error { defer close(injectionComplete) if err != nil { injectionError = err return err } glog.V(2).Info("starting the injections uploading ...") for _, s := range config.Injections { if err := builder.docker.UploadToContainer(s.Source, s.Destination, containerID); err != nil { injectionError = util.HandleInjectionError(s, err) return err } } if err := builder.docker.UploadToContainer(rmScript, "/tmp/rm-injections", containerID); err != nil { injectionError = util.HandleInjectionError(api.VolumeSpec{Source: rmScript, Destination: "/tmp/rm-injections"}, err) return err } if originalOnStart != nil { return originalOnStart(containerID) } return nil } } else { close(injectionComplete) } wg := sync.WaitGroup{} if !config.LayeredBuild { wg.Add(1) uploadDir := filepath.Join(config.WorkingDir, "upload") // TODO: be able to pass a stream directly to the Docker build to avoid the double temp hit r, w := io.Pipe() go func() { // reminder, multiple defers follow a stack, LIFO order of processing defer wg.Done() // Wait for the injections to complete and check the error. Do not start // streaming the sources when the injection failed. <-injectionComplete if injectionError != nil { return } glog.V(2).Info("starting the source uploading ...") var err error defer func() { w.CloseWithError(err) if r := recover(); r != nil { glog.Errorf("recovered panic: %#v", r) } }() err = builder.tar.CreateTarStream(uploadDir, false, w) }() opts.Stdin = r } go func(reader io.Reader) { scanner := bufio.NewReader(reader) // Precede build output with newline glog.Info() for { text, err := scanner.ReadString('\n') if err != nil { // we're ignoring ErrClosedPipe, as this is information // the docker container ended streaming logs if glog.Is(2) && err != io.ErrClosedPipe && err != io.EOF { glog.Errorf("Error reading docker stdout, %#v", err) } break } // Nothing is printed when the quiet option is set if config.Quiet { continue } glog.Info(strings.TrimSpace(text)) } // Terminate build output with new line glog.Info() }(outReader) go dockerpkg.StreamContainerIO(errReader, &errOutput, func(a ...interface{}) { glog.Info(a...) }) err := builder.docker.RunContainer(opts) if e, ok := err.(errors.ContainerError); ok { // even with deferred close above, close errReader now so we avoid data race condition on errOutput; // closing will cause StreamContainerIO to exit, thus releasing the writer in the equation errReader.Close() return errors.NewContainerError(config.BuilderImage, e.ErrorCode, errOutput) } // Do not wait for source input if the container times out. // FIXME: this potentially leaks a goroutine. if !util.IsTimeoutError(err) { wg.Wait() } return err }
// RunContainer creates and starts a container using the image specified in the options with the ability // to stream input or output func (d *stiDocker) RunContainer(opts RunContainerOptions) (err error) { // get info about the specified image image := getImageName(opts.Image) var imageMetadata *docker.Image if opts.PullImage { imageMetadata, err = d.CheckAndPullImage(image) } else { imageMetadata, err = d.client.InspectImage(image) } if err != nil { glog.Errorf("Unable to get image metadata for %s: %v", image, err) return err } // base directory for all STI commands var commandBaseDir string // untar operation destination directory tarDestination := opts.Destination if len(tarDestination) == 0 { tarDestination = getDestination(imageMetadata) } if opts.ExternalScripts { // for external scripts we must always append 'scripts' because this is // the default subdirectory inside tar for them commandBaseDir = filepath.Join(tarDestination, "scripts") glog.V(2).Infof("Both scripts and untarred source will be placed in '%s'", tarDestination) } else { // for internal scripts we can have separate path for scripts and untar operation destination scriptsURL := opts.ScriptsURL if len(scriptsURL) == 0 { scriptsURL = getScriptsURL(imageMetadata) } commandBaseDir = strings.TrimPrefix(scriptsURL, "image://") glog.V(2).Infof("Base directory for STI scripts is '%s'. Untarring destination is '%s'.", commandBaseDir, tarDestination) } cmd := []string{filepath.Join(commandBaseDir, string(opts.Command))} // when calling assemble script with Stdin parameter set (the tar file) // we need to first untar the whole archive and only then call the assemble script if opts.Stdin != nil && (opts.Command == api.Assemble || opts.Command == api.Usage) { cmd = []string{"/bin/sh", "-c", fmt.Sprintf("tar -C %s -xf - && %s", tarDestination, filepath.Join(commandBaseDir, string(opts.Command)))} } config := docker.Config{ Image: image, Cmd: cmd, } if opts.Env != nil { config.Env = opts.Env } if opts.Stdin != nil { config.OpenStdin = true config.StdinOnce = true } if opts.Stdout != nil { config.AttachStdout = true } glog.V(2).Infof("Creating container using config: %+v", config) container, err := d.client.CreateContainer(docker.CreateContainerOptions{Name: "", Config: &config}) if err != nil { return err } defer d.RemoveContainer(container.ID) glog.V(2).Infof("Attaching to container") attached := make(chan struct{}) attachOpts := docker.AttachToContainerOptions{ Container: container.ID, Success: attached, Stream: true, } if opts.Stdin != nil { attachOpts.InputStream = opts.Stdin attachOpts.Stdin = true } else if opts.Stdout != nil { attachOpts.OutputStream = opts.Stdout attachOpts.Stdout = true } if opts.Stderr != nil { attachOpts.ErrorStream = opts.Stderr attachOpts.Stderr = true } wg := sync.WaitGroup{} go func() { wg.Add(1) defer wg.Done() if err := d.client.AttachToContainer(attachOpts); err != nil { glog.Errorf("Unable to attach container with %v", attachOpts) } }() attached <- <-attached // If attaching both stdin and stdout or stderr, attach stdout and stderr in // a second goroutine // TODO remove this goroutine when docker 1.4 will be in broad usage, // see: https://github.com/docker/docker/commit/f936a10d8048f471d115978472006e1b58a7c67d if opts.Stdin != nil && opts.Stdout != nil { attached2 := make(chan struct{}) attachOpts2 := docker.AttachToContainerOptions{ Container: container.ID, Success: attached2, Stream: true, OutputStream: opts.Stdout, Stdout: true, } if opts.Stderr != nil { attachOpts2.Stderr = true attachOpts2.ErrorStream = opts.Stderr } go func() { wg.Add(1) defer wg.Done() if err := d.client.AttachToContainer(attachOpts2); err != nil { glog.Errorf("Unable to attach container with %v", attachOpts2) } }() attached2 <- <-attached2 } glog.V(2).Infof("Starting container") if err = d.client.StartContainer(container.ID, nil); err != nil { return err } if opts.OnStart != nil { if err = opts.OnStart(); err != nil { return err } } glog.V(2).Infof("Waiting for container") exitCode, err := d.client.WaitContainer(container.ID) wg.Wait() if err != nil { return err } glog.V(2).Infof("Container exited") if exitCode != 0 { return errors.NewContainerError(container.Name, exitCode, "") } if opts.PostExec != nil { glog.V(2).Infof("Invoking postExecution function") if err = opts.PostExec.PostExecute(container.ID, tarDestination); err != nil { return err } } return nil }
// Execute runs the specified STI script in the builder image. func (builder *STI) Execute(command string, user string, config *api.Config) error { glog.V(2).Infof("Using image name %s", config.BuilderImage) // we can't invoke this method before (for example in New() method) // because of later initialization of config.WorkingDir builder.env = createBuildEnvironment(config) errOutput := "" outReader, outWriter := io.Pipe() errReader, errWriter := io.Pipe() externalScripts := builder.externalScripts[command] // if LayeredBuild is called then all the scripts will be placed inside the image if config.LayeredBuild { externalScripts = false } opts := dockerpkg.RunContainerOptions{ Image: config.BuilderImage, Stdout: outWriter, Stderr: errWriter, // The PullImage is false because the PullImage function should be called // before we run the container PullImage: false, ExternalScripts: externalScripts, ScriptsURL: config.ScriptsURL, Destination: config.Destination, Command: command, Env: builder.env, User: user, PostExec: builder.postExecutor, NetworkMode: string(config.DockerNetworkMode), CGroupLimits: config.CGroupLimits, CapDrop: config.DropCapabilities, Binds: config.BuildVolumes.AsBinds(), } // If there are injections specified, override the original assemble script // and wait till all injections are uploaded into the container that runs the // assemble script. injectionError := make(chan error) if len(config.Injections) > 0 && command == api.Assemble { workdir, err := builder.docker.GetImageWorkdir(config.BuilderImage) if err != nil { builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason( utilstatus.ReasonGenericS2IBuildFailed, utilstatus.ReasonMessageGenericS2iBuildFailed, ) return err } config.Injections = util.FixInjectionsWithRelativePath(workdir, config.Injections) injectedFiles, err := util.ExpandInjectedFiles(builder.fs, config.Injections) if err != nil { builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason( utilstatus.ReasonInstallScriptsFailed, utilstatus.ReasonMessageInstallScriptsFailed, ) return err } rmScript, err := util.CreateInjectedFilesRemovalScript(injectedFiles, "/tmp/rm-injections") if err != nil { builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason( utilstatus.ReasonGenericS2IBuildFailed, utilstatus.ReasonMessageGenericS2iBuildFailed, ) return err } defer os.Remove(rmScript) opts.CommandOverrides = func(cmd string) string { return fmt.Sprintf("while [ ! -f %q ]; do sleep 0.5; done; %s; result=$?; source %[1]s; exit $result", "/tmp/rm-injections", cmd) } originalOnStart := opts.OnStart opts.OnStart = func(containerID string) error { defer close(injectionError) glog.V(2).Info("starting the injections uploading ...") for _, s := range config.Injections { if err := builder.docker.UploadToContainer(builder.fs, s.Source, s.Destination, containerID); err != nil { injectionError <- util.HandleInjectionError(s, err) return err } } if err := builder.docker.UploadToContainer(builder.fs, rmScript, "/tmp/rm-injections", containerID); err != nil { injectionError <- util.HandleInjectionError(api.VolumeSpec{Source: rmScript, Destination: "/tmp/rm-injections"}, err) return err } if originalOnStart != nil { return originalOnStart(containerID) } return nil } } else { close(injectionError) } if !config.LayeredBuild { r, w := io.Pipe() opts.Stdin = r go func() { // Wait for the injections to complete and check the error. Do not start // streaming the sources when the injection failed. if <-injectionError != nil { w.Close() return } glog.V(2).Info("starting the source uploading ...") uploadDir := filepath.Join(config.WorkingDir, "upload") w.CloseWithError(builder.tar.CreateTarStream(uploadDir, false, w)) }() } dockerpkg.StreamContainerIO(outReader, nil, func(s string) { if !config.Quiet { glog.Info(strings.TrimSpace(s)) } }) c := dockerpkg.StreamContainerIO(errReader, &errOutput, func(s string) { glog.Info(s) }) err := builder.docker.RunContainer(opts) if e, ok := err.(s2ierr.ContainerError); ok { // Must wait for StreamContainerIO goroutine above to exit before reading errOutput. <-c err = s2ierr.NewContainerError(config.BuilderImage, e.ErrorCode, errOutput) } return err }
// RunContainer creates and starts a container using the image specified in opts // with the ability to stream input and/or output. func (d *stiDocker) RunContainer(opts RunContainerOptions) error { createOpts := opts.asDockerCreateContainerOptions() // get info about the specified image image := createOpts.Config.Image inspect, err := d.InspectImage(image) imageMetadata := &api.Image{} if err == nil { updateImageWithInspect(imageMetadata, inspect) if opts.PullImage { _, err = d.CheckAndPullImage(image) } } if err != nil { glog.V(0).Infof("error: Unable to get image metadata for %s: %v", image, err) return err } entrypoint, err := d.GetImageEntrypoint(image) if err != nil { return fmt.Errorf("Couldn't get entrypoint of %q image: %v", image, err) } // If the image has an entrypoint already defined, // it will be overridden either by DefaultEntrypoint, // or by the value in opts.Entrypoint. // If the image does not have an entrypoint, but // opts.Entrypoint is supplied, opts.Entrypoint will // be respected. if len(entrypoint) != 0 && len(opts.Entrypoint) == 0 { opts.Entrypoint = DefaultEntrypoint } // tarDestination will be passed as location to PostExecute function // and will be used as the prefix for the CMD (scripts/run) var tarDestination string var cmd []string if !opts.TargetImage { if len(opts.CommandExplicit) != 0 { cmd = opts.CommandExplicit } else { tarDestination = determineTarDestinationDir(opts, imageMetadata) cmd = constructCommand(opts, imageMetadata, tarDestination) } glog.V(5).Infof("Setting %q command for container ...", strings.Join(cmd, " ")) } createOpts.Config.Cmd = cmd // Create a new container. glog.V(2).Infof("Creating container with options {Name:%q Config:%+v HostConfig:%+v} ...", createOpts.Name, createOpts.Config, createOpts.HostConfig) ctx, cancel := getDefaultContext() defer cancel() if createOpts.HostConfig != nil && createOpts.HostConfig.ShmSize <= 0 { createOpts.HostConfig.ShmSize = DefaultShmSize } container, err := d.client.ContainerCreate(ctx, createOpts.Config, createOpts.HostConfig, createOpts.NetworkingConfig, createOpts.Name) if err != nil { return err } // Container was created, so we defer its removal, and also remove it if we get a SIGINT/SIGTERM/SIGQUIT/SIGHUP. removeContainer := func() { glog.V(4).Infof("Removing container %q ...", container.ID) if removeErr := d.RemoveContainer(container.ID); removeErr != nil { glog.V(0).Infof("warning: Failed to remove container %q: %v", container.ID, removeErr) } else { glog.V(4).Infof("Removed container %q", container.ID) } } dumpStack := func(signal os.Signal) { if signal == syscall.SIGQUIT { buf := make([]byte, 1<<16) runtime.Stack(buf, true) fmt.Printf("%s", buf) } os.Exit(2) } return interrupt.New(dumpStack, removeContainer).Run(func() error { // Attach to the container on go thread (different than with go-dockerclient, since it provided a non-blocking attach which we don't seem to have with k8s/engine-api) // Attach to the container on go thread to mimic blocking behavior we had with go-dockerclient (k8s wrapper blocks); then use borrowed code // from k8s to dump logs via return // still preserve the flow of attaching before starting to handle various timing issues encountered in the past, as well as allow for --run option glog.V(2).Infof("Attaching to container %q ...", container.ID) errorChannel := make(chan error) timeoutTimer := time.NewTimer(DefaultDockerTimeout) var attachLoggingError error // unit tests found a DATA RACE on attachLoggingError; at first a simple mutex seemed sufficient, but a race condition in holdHijackedConnection manifested // where <-receiveStdout would block even after the container had exitted, blocking the return with attachLoggingError; rather than trying to discern if the // container exited in holdHijackedConnection, we'll using channel based signaling coupled with a time to avoid blocking forever attachExit := make(chan bool, 1) go func() { ctx, cancel := getDefaultContext() defer cancel() resp, attachErr := d.client.ContainerAttach(ctx, container.ID, opts.asDockerAttachToContainerOptions()) errorChannel <- attachErr if attachErr != nil { glog.V(0).Infof("error: Unable to attach to container %q: %v", container.ID, attachErr) return } defer resp.Close() attachLoggingError = d.holdHijackedConnection(false, opts.Stdin, opts.Stdout, opts.Stderr, resp) attachExit <- true }() // this error check should handle the result from the d.client.ContainerAttach call ... we progress to start when that occurs select { case err = <-errorChannel: // in non-error scenarios, temporary tracing confirmed that // unless the container starts, then exits, the attach blocks and // never returns either a nil for success or whatever err it might // return if the attach failed if err != nil { return err } break case <-timeoutTimer.C: return fmt.Errorf("timed out waiting to attach to container %s ", container.ID) } // Start the container glog.V(2).Infof("Starting container %q ...", container.ID) ctx, cancel := getDefaultContext() defer cancel() err = d.client.ContainerStart(ctx, container.ID, dockertypes.ContainerStartOptions{}) if err != nil { return err } // Run OnStart hook if defined. OnStart might block, so we run it in a // new goroutine, and wait for it to be done later on. onStartDone := make(chan error, 1) if opts.OnStart != nil { go func() { onStartDone <- opts.OnStart(container.ID) }() } if opts.TargetImage { // When TargetImage is true, we're dealing with an invocation of `s2i build ... --run` // so this will, e.g., run a web server and block until the user interrupts it (or // the container exits normally). dump port/etc information for the user. dumpContainerInfo(container, d, image) } // Return an error if the exit code of the container is // non-zero. glog.V(4).Infof("Waiting for container %q to stop ...", container.ID) exitCode, err := d.client.ContainerWait(context.Background(), container.ID) if err != nil { return fmt.Errorf("waiting for container %q to stop: %v", container.ID, err) } if exitCode != 0 { return errors.NewContainerError(container.ID, exitCode, "") } // FIXME: If Stdout or Stderr can be closed, close it to notify that // there won't be any more writes. This is a hack to close the write // half of a pipe so that the read half sees io.EOF. // In particular, this is needed to eventually terminate code that runs // on OnStart and blocks reading from the pipe. if c, ok := opts.Stdout.(io.Closer); ok { c.Close() } if c, ok := opts.Stderr.(io.Closer); ok { c.Close() } // OnStart must be done before we move on. if opts.OnStart != nil { if err = <-onStartDone; err != nil { return err } } // Run PostExec hook if defined. if opts.PostExec != nil { glog.V(2).Infof("Invoking PostExecute function") if err = opts.PostExec.PostExecute(container.ID, tarDestination); err != nil { return err } } select { case <-attachExit: return attachLoggingError case <-time.After(DefaultDockerTimeout): return nil } }) }
func (step *startRuntimeImageAndUploadFilesStep) execute(ctx *postExecutorStepContext) error { glog.V(3).Info("Executing step: start runtime image and upload files") fd, err := ioutil.TempFile("", "s2i-upload-done") if err != nil { return err } fd.Close() lastFilePath := fd.Name() defer func() { os.Remove(lastFilePath) }() lastFileDstPath := "/tmp/" + filepath.Base(lastFilePath) outReader, outWriter := io.Pipe() errReader, errWriter := io.Pipe() artifactsDir := filepath.Join(step.builder.config.WorkingDir, api.RuntimeArtifactsDir) // We copy scripts to a directory with artifacts to upload files in one shot for _, script := range []string{api.AssembleRuntime, api.Run} { // scripts must be inside of "scripts" subdir, see createCommandForExecutingRunScript() destinationDir := filepath.Join(artifactsDir, "scripts") err = step.copyScriptIfNeeded(script, destinationDir) if err != nil { return err } } image := step.builder.config.RuntimeImage workDir, err := step.docker.GetImageWorkdir(image) if err != nil { return fmt.Errorf("could not get working dir of %q image: %v", image, err) } commandBaseDir := filepath.Join(workDir, "scripts") useExternalAssembleScript := step.builder.externalScripts[api.AssembleRuntime] if !useExternalAssembleScript { // script already inside of the image var scriptsURL string scriptsURL, err = step.docker.GetScriptsURL(image) if err != nil { return err } if len(scriptsURL) == 0 { return fmt.Errorf("could not determine scripts URL for image %q", image) } commandBaseDir = strings.TrimPrefix(scriptsURL, "image://") } cmd := fmt.Sprintf( "while [ ! -f %q ]; do sleep 0.5; done; %s/%s; exit $?", lastFileDstPath, commandBaseDir, api.AssembleRuntime, ) opts := dockerpkg.RunContainerOptions{ Image: image, PullImage: false, // The PullImage is false because we've already pulled the image CommandExplicit: []string{"/bin/sh", "-c", cmd}, Stdout: outWriter, Stderr: errWriter, NetworkMode: string(step.builder.config.DockerNetworkMode), CGroupLimits: step.builder.config.CGroupLimits, CapDrop: step.builder.config.DropCapabilities, PostExec: step.builder.postExecutor, Env: step.builder.env, } opts.OnStart = func(containerID string) error { setStandardPerms := func(writer io.Writer) s2itar.Writer { return s2itar.ChmodAdapter{Writer: tar.NewWriter(writer), NewFileMode: 0644, NewExecFileMode: 0755, NewDirMode: 0755} } glog.V(5).Infof("Uploading directory %q -> %q", artifactsDir, workDir) onStartErr := step.docker.UploadToContainerWithTarWriter(step.fs, artifactsDir, workDir, containerID, setStandardPerms) if onStartErr != nil { return fmt.Errorf("could not upload directory (%q -> %q) into container %s: %v", artifactsDir, workDir, containerID, err) } glog.V(5).Infof("Uploading file %q -> %q", lastFilePath, lastFileDstPath) onStartErr = step.docker.UploadToContainerWithTarWriter(step.fs, lastFilePath, lastFileDstPath, containerID, setStandardPerms) if onStartErr != nil { return fmt.Errorf("could not upload file (%q -> %q) into container %s: %v", lastFilePath, lastFileDstPath, containerID, err) } return onStartErr } dockerpkg.StreamContainerIO(outReader, nil, func(s string) { glog.V(0).Info(s) }) errOutput := "" c := dockerpkg.StreamContainerIO(errReader, &errOutput, func(s string) { glog.Info(s) }) // switch to the next stage of post executors steps step.builder.postExecutorStage++ err = step.docker.RunContainer(opts) if e, ok := err.(s2ierr.ContainerError); ok { // Must wait for StreamContainerIO goroutine above to exit before reading errOutput. <-c err = s2ierr.NewContainerError(image, e.ErrorCode, errOutput) } return err }
func (step *startRuntimeImageAndUploadFilesStep) execute(ctx *postExecutorStepContext) error { glog.V(3).Info("Executing step: start runtime image and upload files") fd, err := ioutil.TempFile("", "s2i-upload-done") if err != nil { return err } fd.Close() lastFilePath := fd.Name() defer func() { os.Remove(lastFilePath) }() lastFileDstPath := "/tmp/" + filepath.Base(lastFilePath) outReader, outWriter := io.Pipe() defer outReader.Close() defer outWriter.Close() errReader, errWriter := io.Pipe() defer errReader.Close() defer errWriter.Close() artifactsDir := filepath.Join(step.builder.config.WorkingDir, api.RuntimeArtifactsDir) // We copy scripts to a directory with artifacts to upload files in one shot for _, script := range []string{api.AssembleRuntime, api.Run} { // scripts must be inside of "scripts" subdir, see createCommandForExecutingRunScript() destinationDir := filepath.Join(artifactsDir, "scripts") if err := step.copyScriptIfNeeded(script, destinationDir); err != nil { return err } } image := step.builder.config.RuntimeImage workDir, err := step.docker.GetImageWorkdir(image) if err != nil { return fmt.Errorf("Couldn't get working dir of %q image: %v", image, err) } commandBaseDir := filepath.Join(workDir, "scripts") useExternalAssembleScript := step.builder.externalScripts[api.AssembleRuntime] if !useExternalAssembleScript { // script already inside of the image scriptsURL, err := step.docker.GetScriptsURL(image) if err != nil { return err } if len(scriptsURL) == 0 { return fmt.Errorf("Couldn't determine scripts URL for image %q", image) } commandBaseDir = strings.TrimPrefix(scriptsURL, "image://") } cmd := fmt.Sprintf( "while [ ! -f %q ]; do sleep 0.5; done; %s/%s; exit $?", lastFileDstPath, commandBaseDir, api.AssembleRuntime, ) opts := dockerpkg.RunContainerOptions{ Image: image, Entrypoint: DefaultEntrypoint, PullImage: false, // The PullImage is false because we've already pulled the image CommandExplicit: []string{"/bin/sh", "-c", cmd}, Stdout: outWriter, Stderr: errWriter, NetworkMode: string(step.builder.config.DockerNetworkMode), CGroupLimits: step.builder.config.CGroupLimits, CapDrop: step.builder.config.DropCapabilities, PostExec: step.builder.postExecutor, Env: step.builder.env, } opts.OnStart = func(containerID string) error { setStandardPerms := func(path string, info os.FileInfo, err error) error { if err != nil { return err } // chmod does nothing on windows anyway. if runtime.GOOS == "windows" { return nil } // Skip chmod for symlinks if info.Mode()&os.ModeSymlink != 0 { return nil } // file should be writable by owner (u=w) and readable by other users (a=r), // executable bit should be left as is mode := os.FileMode(0644) // syscall.S_IEXEC == 0x40 but we can't reference the constant if we want // to build releases for windows. if info.IsDir() || info.Mode()&0x40 != 0 { mode = 0755 } return step.fs.Chmod(path, mode) } glog.V(5).Infof("Uploading directory %q -> %q", artifactsDir, workDir) if err := step.docker.UploadToContainerWithCallback(artifactsDir, workDir, containerID, setStandardPerms, true); err != nil { return fmt.Errorf("Couldn't upload directory (%q -> %q) into container %s: %v", artifactsDir, workDir, containerID, err) } glog.V(5).Infof("Uploading file %q -> %q", lastFilePath, lastFileDstPath) if err := step.docker.UploadToContainerWithCallback(lastFilePath, lastFileDstPath, containerID, setStandardPerms, true); err != nil { return fmt.Errorf("Couldn't upload file (%q -> %q) into container %s: %v", lastFilePath, lastFileDstPath, containerID, err) } return err } go dockerpkg.StreamContainerIO(outReader, nil, func(a ...interface{}) { glog.V(0).Info(a...) }) errOutput := "" go dockerpkg.StreamContainerIO(errReader, &errOutput, func(a ...interface{}) { glog.Info(a...) }) // switch to the next stage of post executors steps step.builder.postExecutorStage++ err = step.docker.RunContainer(opts) if e, ok := err.(errors.ContainerError); ok { return errors.NewContainerError(image, e.ErrorCode, errOutput) } return nil }
// RunContainer creates and starts a container using the image specified in opts // with the ability to stream input and/or output. func (d *stiDocker) RunContainer(opts RunContainerOptions) error { createOpts := opts.asDockerCreateContainerOptions() // get info about the specified image image := createOpts.Config.Image inspect, err := d.InspectImage(image) imageMetadata := &api.Image{} if err == nil { updateImageWithInspect(imageMetadata, inspect) if opts.PullImage { _, err = d.CheckAndPullImage(image) } } if err != nil { glog.V(0).Infof("error: Unable to get image metadata for %s: %v", image, err) return err } entrypoint, err := d.GetImageEntrypoint(image) if err != nil { return fmt.Errorf("could not get entrypoint of %q image: %v", image, err) } // If the image has an entrypoint already defined, // it will be overridden either by DefaultEntrypoint, // or by the value in opts.Entrypoint. // If the image does not have an entrypoint, but // opts.Entrypoint is supplied, opts.Entrypoint will // be respected. if len(entrypoint) != 0 && len(opts.Entrypoint) == 0 { opts.Entrypoint = DefaultEntrypoint } // tarDestination will be passed as location to PostExecute function // and will be used as the prefix for the CMD (scripts/run) var tarDestination string var cmd []string if !opts.TargetImage { if len(opts.CommandExplicit) != 0 { cmd = opts.CommandExplicit } else { tarDestination = determineTarDestinationDir(opts, imageMetadata) cmd = constructCommand(opts, imageMetadata, tarDestination) } glog.V(5).Infof("Setting %q command for container ...", strings.Join(cmd, " ")) } createOpts.Config.Cmd = cmd if createOpts.HostConfig != nil && createOpts.HostConfig.ShmSize <= 0 { createOpts.HostConfig.ShmSize = DefaultShmSize } // Create a new container. glog.V(2).Infof("Creating container with options {Name:%q Config:%+v HostConfig:%+v} ...", createOpts.Name, createOpts.Config, createOpts.HostConfig) ctx, cancel := getDefaultContext() defer cancel() container, err := d.client.ContainerCreate(ctx, createOpts.Config, createOpts.HostConfig, createOpts.NetworkingConfig, createOpts.Name) if err != nil { return err } // Container was created, so we defer its removal, and also remove it if we get a SIGINT/SIGTERM/SIGQUIT/SIGHUP. removeContainer := func() { glog.V(4).Infof("Removing container %q ...", container.ID) if removeErr := d.RemoveContainer(container.ID); removeErr != nil { glog.V(0).Infof("warning: Failed to remove container %q: %v", container.ID, removeErr) } else { glog.V(4).Infof("Removed container %q", container.ID) } } dumpStack := func(signal os.Signal) { if signal == syscall.SIGQUIT { buf := make([]byte, 1<<16) runtime.Stack(buf, true) fmt.Printf("%s", buf) } os.Exit(2) } return interrupt.New(dumpStack, removeContainer).Run(func() error { glog.V(2).Infof("Attaching to container %q ...", container.ID) ctx, cancel := getDefaultContext() defer cancel() resp, err := d.client.ContainerAttach(ctx, container.ID, opts.asDockerAttachToContainerOptions()) if err != nil { glog.V(0).Infof("error: Unable to attach to container %q: %v", container.ID, err) return err } defer resp.Close() // Start the container glog.V(2).Infof("Starting container %q ...", container.ID) ctx, cancel = getDefaultContext() defer cancel() err = d.client.ContainerStart(ctx, container.ID) if err != nil { return err } // Run OnStart hook if defined. OnStart might block, so we run it in a // new goroutine, and wait for it to be done later on. onStartDone := make(chan error, 1) if opts.OnStart != nil { go func() { onStartDone <- opts.OnStart(container.ID) }() } if opts.TargetImage { // When TargetImage is true, we're dealing with an invocation of `s2i build ... --run` // so this will, e.g., run a web server and block until the user interrupts it (or // the container exits normally). dump port/etc information for the user. dumpContainerInfo(container, d, image) } err = d.holdHijackedConnection(false, opts.Stdin, opts.Stdout, opts.Stderr, resp) if err != nil { return err } // Return an error if the exit code of the container is // non-zero. glog.V(4).Infof("Waiting for container %q to stop ...", container.ID) exitCode, err := d.client.ContainerWait(context.Background(), container.ID) if err != nil { return fmt.Errorf("waiting for container %q to stop: %v", container.ID, err) } if exitCode != 0 { return s2ierr.NewContainerError(container.ID, exitCode, "") } // OnStart must be done before we move on. if opts.OnStart != nil { if err = <-onStartDone; err != nil { return err } } // Run PostExec hook if defined. if opts.PostExec != nil { glog.V(2).Infof("Invoking PostExecute function") if err = opts.PostExec.PostExecute(container.ID, tarDestination); err != nil { return err } } return nil }) }
// RunContainer creates and starts a container using the image specified in opts // with the ability to stream input and/or output. func (d *stiDocker) RunContainer(opts RunContainerOptions) error { createOpts := opts.asDockerCreateContainerOptions() // get info about the specified image image := createOpts.Config.Image var ( imageMetadata *docker.Image err error ) if opts.PullImage { imageMetadata, err = d.CheckAndPullImage(image) } else { imageMetadata, err = d.client.InspectImage(image) } if err != nil { glog.Errorf("Unable to get image metadata for %s: %v", image, err) return err } cmd, tarDestination := runContainerTar(opts, imageMetadata) createOpts.Config.Cmd = cmd // Create a new container. glog.V(2).Infof("Creating container with options {Name:%q Config:%+v HostConfig:%+v} ...", createOpts.Name, createOpts.Config, createOpts.HostConfig) var container *docker.Container if err := util.TimeoutAfter(DefaultDockerTimeout, "timeout after waiting %v for Docker to create container", func() error { var createErr error container, createErr = d.client.CreateContainer(createOpts) return createErr }); err != nil { return err } containerName := containerNameOrID(container) // Container was created, so we defer its removal. defer func() { glog.V(4).Infof("Removing container %q ...", containerName) if err := d.RemoveContainer(container.ID); err != nil { glog.Warningf("Failed to remove container %q: %v", containerName, err) } else { glog.V(4).Infof("Removed container %q", containerName) } }() // Attach to the container. glog.V(2).Infof("Attaching to container %q ...", containerName) attachOpts := opts.asDockerAttachToContainerOptions() attachOpts.Container = container.ID if _, err = d.client.AttachToContainerNonBlocking(attachOpts); err != nil { glog.Errorf("Unable to attach to container %q with options %+v: %v", containerName, attachOpts, err) return err } // Start the container. glog.V(2).Infof("Starting container %q ...", containerName) if err := util.TimeoutAfter(DefaultDockerTimeout, "timeout after waiting %v for Docker to start container", func() error { return d.client.StartContainer(container.ID, nil) }); err != nil { return err } // Run OnStart hook if defined. OnStart might block, so we run it in a // new goroutine, and wait for it to be done later on. onStartDone := make(chan error, 1) if opts.OnStart != nil { go func() { onStartDone <- opts.OnStart(container.ID) }() } // We either block waiting for a user-originated SIGINT, or wait for the // container to terminate. When TargetImage is true, we're dealing with // an invocation of `s2i build ... --run` so this will, e.g., run a web // server and block until the user interrupts it. The other case is seen // when running the assemble script or other commands that are meant to // terminate in a finite amount of time. if opts.TargetImage { runContainerDockerRun(container, d, image) } else { // Return an error if the exit code of the container is // non-zero. glog.V(4).Infof("Waiting for container %q to stop ...", containerName) exitCode, err := d.client.WaitContainer(container.ID) if err != nil { return fmt.Errorf("waiting for container %q to stop: %v", containerName, err) } if exitCode != 0 { return errors.NewContainerError(container.Name, exitCode, "") } } // FIXME: If Stdout or Stderr can be closed, close it to notify that // there won't be any more writes. This is a hack to close the write // half of a pipe so that the read half sees io.EOF. // In particular, this is needed to eventually terminate code that runs // on OnStart and blocks reading from the pipe. if c, ok := opts.Stdout.(io.Closer); ok { c.Close() } if c, ok := opts.Stderr.(io.Closer); ok { c.Close() } // OnStart must be done before we move on. if opts.OnStart != nil { if err := <-onStartDone; err != nil { return err } } // Run PostExec hook if defined. if opts.PostExec != nil { glog.V(2).Infof("Invoking postExecution function") if err = opts.PostExec.PostExecute(container.ID, tarDestination); err != nil { return err } } return nil }
// Execute runs the specified STI script in the builder image. func (b *STI) Execute(command string, config *api.Config) error { glog.V(2).Infof("Using image name %s", config.BuilderImage) env, err := scripts.GetEnvironment(config) if err != nil { glog.V(1).Infof("No .sti/environment provided (%v)", err) } buildEnv := append(scripts.ConvertEnvironment(env), b.generateConfigEnv()...) uploadDir := filepath.Join(config.WorkingDir, "upload") tarFileName, err := b.tar.CreateTarFile(config.WorkingDir, uploadDir) if err != nil { return err } tarFile, err := b.fs.Open(tarFileName) if err != nil { return err } defer tarFile.Close() errOutput := "" outReader, outWriter := io.Pipe() errReader, errWriter := io.Pipe() defer outReader.Close() defer outWriter.Close() defer errReader.Close() defer errWriter.Close() externalScripts := b.externalScripts[command] // if LayeredBuild is called then all the scripts will be placed inside the image if config.LayeredBuild { externalScripts = false } opts := docker.RunContainerOptions{ Image: config.BuilderImage, Stdout: outWriter, Stderr: errWriter, PullImage: config.ForcePull, ExternalScripts: externalScripts, ScriptsURL: config.ScriptsURL, Destination: config.Destination, Command: command, Env: buildEnv, PostExec: b.postExecutor, } if !config.LayeredBuild { opts.Stdin = tarFile } go func(reader io.Reader) { scanner := bufio.NewReader(reader) for { text, err := scanner.ReadString('\n') if err != nil { // we're ignoring ErrClosedPipe, as this is information // the docker container ended streaming logs if glog.V(2) && err != io.ErrClosedPipe { glog.Errorf("Error reading docker stdout, %v", err) } break } if glog.V(2) || config.Quiet != true || command == api.Usage { glog.Info(text) } } }(outReader) go streamContainerError(errReader, &errOutput, config) err = b.docker.RunContainer(opts) if e, ok := err.(errors.ContainerError); ok { return errors.NewContainerError(config.BuilderImage, e.ErrorCode, errOutput) } return err }
// RunContainer creates and starts a container using the image specified in opts // with the ability to stream input and/or output. func (d *stiDocker) RunContainer(opts RunContainerOptions) error { createOpts := opts.asDockerCreateContainerOptions() // get info about the specified image image := createOpts.Config.Image var ( imageMetadata *docker.Image err error ) if opts.PullImage { imageMetadata, err = d.CheckAndPullImage(image) } else { imageMetadata, err = d.client.InspectImage(image) } // if the original image has no entrypoint, and the run invocation // is trying to set an entrypoint, ignore it. We only want to // set the entrypoint if we need to override a default entrypoint // in the image. This allows us to still work with a minimal image // that does not contain "/usr/bin/env" since we don't attempt to override // the entrypoint. if len(opts.Entrypoint) != 0 { entrypoint, err := d.GetImageEntrypoint(image) if err != nil { return err } if len(entrypoint) == 0 { opts.Entrypoint = nil } } if err != nil { glog.V(0).Infof("error: Unable to get image metadata for %s: %v", image, err) return err } // tarDestination will be passed as location to PostExecute function // and will be used as the prefix for the CMD (scripts/run) var tarDestination string var cmd []string if !opts.TargetImage { if len(opts.CommandExplicit) != 0 { cmd = opts.CommandExplicit } else { tarDestination = determineTarDestinationDir(opts, imageMetadata) cmd = constructCommand(opts, imageMetadata, tarDestination) } glog.V(5).Infof("Setting %q command for container ...", strings.Join(cmd, " ")) } createOpts.Config.Cmd = cmd // Create a new container. glog.V(2).Infof("Creating container with options {Name:%q Config:%+v HostConfig:%+v} ...", createOpts.Name, createOpts.Config, createOpts.HostConfig) var container *docker.Container if err = util.TimeoutAfter(DefaultDockerTimeout, "timeout after waiting %v for Docker to create container", func() error { var createErr error container, createErr = d.client.CreateContainer(createOpts) return createErr }); err != nil { return err } containerName := containerNameOrID(container) // Container was created, so we defer its removal, and also remove it if we get a SIGINT/SIGTERM/SIGQUIT/SIGHUP. removeContainer := func() { glog.V(4).Infof("Removing container %q ...", containerName) if err := d.RemoveContainer(container.ID); err != nil { glog.V(0).Infof("warning: Failed to remove container %q: %v", containerName, err) } else { glog.V(4).Infof("Removed container %q", containerName) } } dumpStack := func(signal os.Signal) { if signal == syscall.SIGQUIT { buf := make([]byte, 1<<16) runtime.Stack(buf, true) fmt.Printf("%s", buf) } os.Exit(2) } return interrupt.New(dumpStack, removeContainer).Run(func() error { // Attach to the container. glog.V(2).Infof("Attaching to container %q ...", containerName) attachOpts := opts.asDockerAttachToContainerOptions() attachOpts.Container = container.ID if _, err = d.client.AttachToContainerNonBlocking(attachOpts); err != nil { glog.V(0).Infof("error: Unable to attach to container %q with options %+v: %v", containerName, attachOpts, err) return err } // Start the container. glog.V(2).Infof("Starting container %q ...", containerName) if err := util.TimeoutAfter(DefaultDockerTimeout, "timeout after waiting %v for Docker to start container", func() error { return d.client.StartContainer(container.ID, nil) }); err != nil { return err } // Run OnStart hook if defined. OnStart might block, so we run it in a // new goroutine, and wait for it to be done later on. onStartDone := make(chan error, 1) if opts.OnStart != nil { go func() { onStartDone <- opts.OnStart(container.ID) }() } if opts.TargetImage { // When TargetImage is true, we're dealing with an invocation of `s2i build ... --run` // so this will, e.g., run a web server and block until the user interrupts it (or // the container exits normally). dump port/etc information for the user. dumpContainerInfo(container, d, image) } // Return an error if the exit code of the container is // non-zero. glog.V(4).Infof("Waiting for container %q to stop ...", containerName) exitCode, err := d.client.WaitContainer(container.ID) if err != nil { return fmt.Errorf("waiting for container %q to stop: %v", containerName, err) } if exitCode != 0 { return errors.NewContainerError(container.Name, exitCode, "") } // FIXME: If Stdout or Stderr can be closed, close it to notify that // there won't be any more writes. This is a hack to close the write // half of a pipe so that the read half sees io.EOF. // In particular, this is needed to eventually terminate code that runs // on OnStart and blocks reading from the pipe. if c, ok := opts.Stdout.(io.Closer); ok { c.Close() } if c, ok := opts.Stderr.(io.Closer); ok { c.Close() } // OnStart must be done before we move on. if opts.OnStart != nil { if err = <-onStartDone; err != nil { return err } } // Run PostExec hook if defined. if opts.PostExec != nil { glog.V(2).Infof("Invoking PostExecute function") if err = opts.PostExec.PostExecute(container.ID, tarDestination); err != nil { return err } } return nil }) }
// Execute runs the specified STI script in the builder image. func (b *STI) Execute(command string, config *api.Config) error { glog.V(2).Infof("Using image name %s", config.BuilderImage) env, err := scripts.GetEnvironment(config) if err != nil { glog.V(1).Infof("No .sti/environment provided (%v)", err) } buildEnv := append(scripts.ConvertEnvironment(env), b.generateConfigEnv()...) errOutput := "" outReader, outWriter := io.Pipe() errReader, errWriter := io.Pipe() defer outReader.Close() defer outWriter.Close() defer errReader.Close() defer errWriter.Close() externalScripts := b.externalScripts[command] // if LayeredBuild is called then all the scripts will be placed inside the image if config.LayeredBuild { externalScripts = false } opts := dockerpkg.RunContainerOptions{ Image: config.BuilderImage, Stdout: outWriter, Stderr: errWriter, PullImage: config.ForcePull, ExternalScripts: externalScripts, ScriptsURL: config.ScriptsURL, Destination: config.Destination, Command: command, Env: buildEnv, PostExec: b.postExecutor, } if !config.LayeredBuild { wg := sync.WaitGroup{} wg.Add(1) uploadDir := filepath.Join(config.WorkingDir, "upload") // TODO: be able to pass a stream directly to the Docker build to avoid the double temp hit r, w := io.Pipe() go func() { var err error defer func() { w.CloseWithError(err) if r := recover(); r != nil { glog.Errorf("recovered panic: %#v", r) } wg.Done() }() err = b.tar.CreateTarStream(uploadDir, false, w) }() opts.Stdin = r defer wg.Wait() } go func(reader io.Reader) { scanner := bufio.NewReader(reader) for { text, err := scanner.ReadString('\n') if err != nil { // we're ignoring ErrClosedPipe, as this is information // the docker container ended streaming logs if glog.V(2) && err != io.ErrClosedPipe { glog.Errorf("Error reading docker stdout, %v", err) } break } if glog.V(2) || config.Quiet != true || command == api.Usage { glog.Info(text) } } }(outReader) go dockerpkg.StreamContainerIO(errReader, &errOutput, glog.Error) err = b.docker.RunContainer(opts) if e, ok := err.(errors.ContainerError); ok { return errors.NewContainerError(config.BuilderImage, e.ErrorCode, errOutput) } return err }
// RunContainer creates and starts a container using the image specified in opts // with the ability to stream input and/or output. func (d *stiDocker) RunContainer(opts RunContainerOptions) error { createOpts := opts.asDockerCreateContainerOptions() // get info about the specified image image := createOpts.Config.Image var ( imageMetadata *docker.Image err error ) if opts.PullImage { imageMetadata, err = d.CheckAndPullImage(image) } else { imageMetadata, err = d.client.InspectImage(image) } if err != nil { glog.Errorf("Unable to get image metadata for %s: %v", image, err) return err } cmd, tarDestination := runContainerTar(opts, imageMetadata) createOpts.Config.Cmd = cmd // Create a new container. glog.V(2).Infof("Creating container with options {Name:%q Config:%+v HostConfig:%+v} ...", createOpts.Name, createOpts.Config, createOpts.HostConfig) var container *docker.Container if err := util.TimeoutAfter(DefaultDockerTimeout, func() error { var createErr error container, createErr = d.client.CreateContainer(createOpts) return createErr }); err != nil { return err } containerName := containerNameOrID(container) // Container was created, so we defer its removal. defer func() { glog.V(4).Infof("Removing container %q ...", containerName) if err := d.RemoveContainer(container.ID); err != nil { glog.Warningf("Failed to remove container %q: %v", containerName, err) } else { glog.V(4).Infof("Removed container %q", containerName) } }() // Attach to the container. glog.V(2).Infof("Attaching to container %q ...", containerName) attachOpts := opts.asDockerAttachToContainerOptions() attachOpts.Container = container.ID if _, err = d.client.AttachToContainerNonBlocking(attachOpts); err != nil { glog.Errorf("Unable to attach to container %q with options %+v: %v", containerName, attachOpts, err) return err } // Start the container. glog.V(2).Infof("Starting container %q ...", containerName) if err := util.TimeoutAfter(DefaultDockerTimeout, func() error { return d.client.StartContainer(container.ID, nil) }); err != nil { return err } // Run OnStart hook if defined. if opts.OnStart != nil { if err = opts.OnStart(container.ID); err != nil { return err } } // We either block waiting for a user-originated SIGINT, or wait for the // container to terminate. When TargetImage is true, we're dealing with // an invocation of `s2i build ... --run` so this will, e.g., run a web // server and block until the user interrupts it. The other case is seen // when running the assemble script or other commands that are meant to // terminate in a finite amount of time. if opts.TargetImage { runContainerDockerRun(container, d, image) } else { // Return an error if the exit code of the container is // non-zero. glog.V(4).Infof("Waiting for container %q to stop ...", containerName) exitCode, err := d.client.WaitContainer(container.ID) if err != nil { return fmt.Errorf("waiting for container %q to stop: %v", containerName, err) } if exitCode != 0 { return errors.NewContainerError(container.Name, exitCode, "") } } // Run PostExec hook if defined. if opts.PostExec != nil { glog.V(2).Infof("Invoking postExecution function") if err = opts.PostExec.PostExecute(container.ID, tarDestination); err != nil { return err } } return nil }