// ExtractTarStreamFromTarReader extracts files from a given tar stream. // Times out if reading from the stream for any given file // exceeds the value of timeout func (t *stiTar) ExtractTarStreamFromTarReader(dir string, tarReader Reader, logger io.Writer) error { err := util.TimeoutAfter(t.timeout, "", func(timeoutTimer *time.Timer) error { for { header, err := tarReader.Next() if !timeoutTimer.Stop() { return &util.TimeoutError{} } timeoutTimer.Reset(t.timeout) if err == io.EOF { return nil } if err != nil { glog.Errorf("Error reading next tar header: %v", err) return err } if header.FileInfo().IsDir() { dirPath := filepath.Join(dir, header.Name) glog.V(3).Infof("Creating directory %s", dirPath) if err = os.MkdirAll(dirPath, 0700); err != nil { glog.Errorf("Error creating dir %q: %v", dirPath, err) return err } } else { fileDir := filepath.Dir(header.Name) dirPath := filepath.Join(dir, fileDir) glog.V(3).Infof("Creating directory %s", dirPath) if err = os.MkdirAll(dirPath, 0700); err != nil { glog.Errorf("Error creating dir %q: %v", dirPath, err) return err } if header.Typeflag == tar.TypeSymlink { if err := t.extractLink(dir, header, tarReader); err != nil { glog.Errorf("Error extracting link %q: %v", header.Name, err) return err } continue } logFile(logger, header.Name) if err := t.extractFile(dir, header, tarReader); err != nil { glog.Errorf("Error extracting file %q: %v", header.Name, err) return err } } } }) if err != nil { glog.Error("Error extracting tar stream") } else { glog.V(2).Info("Done extracting tar stream") } if util.IsTimeoutError(err) { err = s2ierr.NewTarTimeoutError() } return err }
// PullImage pulls an image into the local registry func (d *stiDocker) PullImage(name string) (*api.Image, error) { name = getImageName(name) // RegistryAuth is the base64 encoded credentials for the registry base64Auth, err := base64EncodeAuth(d.pullAuth) if err != nil { return nil, errors.NewPullImageError(name, err) } err = util.TimeoutAfter(DefaultDockerTimeout, fmt.Sprintf("pulling image %q", name), func(timer *time.Timer) error { resp, pullErr := d.client.ImagePull(context.Background(), name, dockertypes.ImagePullOptions{RegistryAuth: base64Auth}) if pullErr != nil { return pullErr } defer resp.Close() decoder := json.NewDecoder(resp) for { if !timer.Stop() { return &util.TimeoutError{} } timer.Reset(DefaultDockerTimeout) var msg dockermessage.JSONMessage pullErr = decoder.Decode(&msg) if pullErr == io.EOF { return nil } if pullErr != nil { return pullErr } if msg.Error != nil { return msg.Error } if msg.ProgressMessage != "" { glog.V(4).Infof("pulling image %s: %s", name, msg.ProgressMessage) } } }) if err != nil { return nil, errors.NewPullImageError(name, err) } inspectResp, err := d.InspectImage(name) if err != nil { return nil, errors.NewPullImageError(name, err) } if inspectResp != nil { image := &api.Image{} updateImageWithInspect(image, inspectResp) return image, nil } return nil, nil }
// RunContainer creates and starts a container using the image specified in opts // with the ability to stream input and/or output. func (d *stiDocker) RunContainer(opts RunContainerOptions) error { createOpts := opts.asDockerCreateContainerOptions() // get info about the specified image image := createOpts.Config.Image var ( imageMetadata *docker.Image err error ) if opts.PullImage { imageMetadata, err = d.CheckAndPullImage(image) } else { imageMetadata, err = d.client.InspectImage(image) } // if the original image has no entrypoint, and the run invocation // is trying to set an entrypoint, ignore it. We only want to // set the entrypoint if we need to override a default entrypoint // in the image. This allows us to still work with a minimal image // that does not contain "/usr/bin/env" since we don't attempt to override // the entrypoint. if len(opts.Entrypoint) != 0 { entrypoint, err := d.GetImageEntrypoint(image) if err != nil { return err } if len(entrypoint) == 0 { opts.Entrypoint = nil } } if err != nil { glog.V(0).Infof("error: Unable to get image metadata for %s: %v", image, err) return err } // tarDestination will be passed as location to PostExecute function // and will be used as the prefix for the CMD (scripts/run) var tarDestination string var cmd []string if !opts.TargetImage { if len(opts.CommandExplicit) != 0 { cmd = opts.CommandExplicit } else { tarDestination = determineTarDestinationDir(opts, imageMetadata) cmd = constructCommand(opts, imageMetadata, tarDestination) } glog.V(5).Infof("Setting %q command for container ...", strings.Join(cmd, " ")) } createOpts.Config.Cmd = cmd // Create a new container. glog.V(2).Infof("Creating container with options {Name:%q Config:%+v HostConfig:%+v} ...", createOpts.Name, createOpts.Config, createOpts.HostConfig) var container *docker.Container if err = util.TimeoutAfter(DefaultDockerTimeout, "timeout after waiting %v for Docker to create container", func() error { var createErr error container, createErr = d.client.CreateContainer(createOpts) return createErr }); err != nil { return err } containerName := containerNameOrID(container) // Container was created, so we defer its removal, and also remove it if we get a SIGINT/SIGTERM/SIGQUIT/SIGHUP. removeContainer := func() { glog.V(4).Infof("Removing container %q ...", containerName) if err := d.RemoveContainer(container.ID); err != nil { glog.V(0).Infof("warning: Failed to remove container %q: %v", containerName, err) } else { glog.V(4).Infof("Removed container %q", containerName) } } dumpStack := func(signal os.Signal) { if signal == syscall.SIGQUIT { buf := make([]byte, 1<<16) runtime.Stack(buf, true) fmt.Printf("%s", buf) } os.Exit(2) } return interrupt.New(dumpStack, removeContainer).Run(func() error { // Attach to the container. glog.V(2).Infof("Attaching to container %q ...", containerName) attachOpts := opts.asDockerAttachToContainerOptions() attachOpts.Container = container.ID if _, err = d.client.AttachToContainerNonBlocking(attachOpts); err != nil { glog.V(0).Infof("error: Unable to attach to container %q with options %+v: %v", containerName, attachOpts, err) return err } // Start the container. glog.V(2).Infof("Starting container %q ...", containerName) if err := util.TimeoutAfter(DefaultDockerTimeout, "timeout after waiting %v for Docker to start container", func() error { return d.client.StartContainer(container.ID, nil) }); err != nil { return err } // Run OnStart hook if defined. OnStart might block, so we run it in a // new goroutine, and wait for it to be done later on. onStartDone := make(chan error, 1) if opts.OnStart != nil { go func() { onStartDone <- opts.OnStart(container.ID) }() } if opts.TargetImage { // When TargetImage is true, we're dealing with an invocation of `s2i build ... --run` // so this will, e.g., run a web server and block until the user interrupts it (or // the container exits normally). dump port/etc information for the user. dumpContainerInfo(container, d, image) } // Return an error if the exit code of the container is // non-zero. glog.V(4).Infof("Waiting for container %q to stop ...", containerName) exitCode, err := d.client.WaitContainer(container.ID) if err != nil { return fmt.Errorf("waiting for container %q to stop: %v", containerName, err) } if exitCode != 0 { return errors.NewContainerError(container.Name, exitCode, "") } // FIXME: If Stdout or Stderr can be closed, close it to notify that // there won't be any more writes. This is a hack to close the write // half of a pipe so that the read half sees io.EOF. // In particular, this is needed to eventually terminate code that runs // on OnStart and blocks reading from the pipe. if c, ok := opts.Stdout.(io.Closer); ok { c.Close() } if c, ok := opts.Stderr.(io.Closer); ok { c.Close() } // OnStart must be done before we move on. if opts.OnStart != nil { if err = <-onStartDone; err != nil { return err } } // Run PostExec hook if defined. if opts.PostExec != nil { glog.V(2).Infof("Invoking PostExecute function") if err = opts.PostExec.PostExecute(container.ID, tarDestination); err != nil { return err } } return nil }) }
// RunContainer creates and starts a container using the image specified in opts // with the ability to stream input and/or output. func (d *stiDocker) RunContainer(opts RunContainerOptions) error { createOpts := opts.asDockerCreateContainerOptions() // get info about the specified image image := createOpts.Config.Image inspect, err := d.InspectImage(image) imageMetadata := &api.Image{} if err == nil { updateImageWithInspect(imageMetadata, inspect) if opts.PullImage { _, err = d.CheckAndPullImage(image) } } if err != nil { glog.V(0).Infof("error: Unable to get image metadata for %s: %v", image, err) return err } entrypoint, err := d.GetImageEntrypoint(image) if err != nil { return fmt.Errorf("Couldn't get entrypoint of %q image: %v", image, err) } // If the image has an entrypoint already defined, // it will be overridden either by DefaultEntrypoint, // or by the value in opts.Entrypoint. // If the image does not have an entrypoint, but // opts.Entrypoint is supplied, opts.Entrypoint will // be respected. if len(entrypoint) != 0 && len(opts.Entrypoint) == 0 { opts.Entrypoint = DefaultEntrypoint } // tarDestination will be passed as location to PostExecute function // and will be used as the prefix for the CMD (scripts/run) var tarDestination string var cmd []string if !opts.TargetImage { if len(opts.CommandExplicit) != 0 { cmd = opts.CommandExplicit } else { tarDestination = determineTarDestinationDir(opts, imageMetadata) cmd = constructCommand(opts, imageMetadata, tarDestination) } glog.V(5).Infof("Setting %q command for container ...", strings.Join(cmd, " ")) } createOpts.Config.Cmd = cmd // Create a new container. glog.V(2).Infof("Creating container with options {Name:%q Config:%+v HostConfig:%+v} ...", createOpts.Name, createOpts.Config, createOpts.HostConfig) var container *dockertypes.ContainerCreateResponse if err = util.TimeoutAfter(DefaultDockerTimeout, "timeout after waiting %v for Docker to create container", func() error { var createErr error if createOpts.HostConfig != nil && createOpts.HostConfig.ShmSize <= 0 { createOpts.HostConfig.ShmSize = DefaultShmSize } container, createErr = d.kubeDockerClient.CreateContainer(createOpts) return createErr }); err != nil { return err } containerName := containerNameOrID(container) // Container was created, so we defer its removal, and also remove it if we get a SIGINT/SIGTERM/SIGQUIT/SIGHUP. removeContainer := func() { glog.V(4).Infof("Removing container %q ...", containerName) if err := d.RemoveContainer(container.ID); err != nil { glog.V(0).Infof("warning: Failed to remove container %q: %v", containerName, err) } else { glog.V(4).Infof("Removed container %q", containerName) } } dumpStack := func(signal os.Signal) { if signal == syscall.SIGQUIT { buf := make([]byte, 1<<16) runtime.Stack(buf, true) fmt.Printf("%s", buf) } os.Exit(2) } return interrupt.New(dumpStack, removeContainer).Run(func() error { // Attach to the container on go thread (different than with go-dockerclient, since it provided a non-blocking attach which we don't seem to have with k8s/engine-api) // Attach to the container on go thread to mimic blocking behavior we had with go-dockerclient (k8s wrapper blocks); then use borrowed code // from k8s to dump logs via return // still preserve the flow of attaching before starting to handle various timing issues encountered in the past, as well as allow for --run option glog.V(2).Infof("Attaching to container %q ...", containerName) errorChannel := make(chan error) timeoutTimer := time.NewTimer(DefaultDockerTimeout) var attachLoggingError error // unit tests found a DATA RACE on attachLoggingError; at first a simple mutex seemed sufficient, but a race condition in holdHijackedConnection manifested // where <-receiveStdout would block even after the container had exitted, blocking the return with attachLoggingError; rather than trying to discern if the // container exited in holdHijackedConnection, we'll using channel based signaling coupled with a time to avoid blocking forever attachExit := make(chan bool, 1) go func() { ctx, cancel := getDefaultContext(DefaultDockerTimeout) defer cancel() resp, err := d.client.ContainerAttach(ctx, container.ID, opts.asDockerAttachToContainerOptions()) errorChannel <- err if err != nil { glog.V(0).Infof("error: Unable to attach to container %q: %v", containerName, err) return } defer resp.Close() sopts := opts.asDockerAttachToStreamOptions() attachLoggingError = d.holdHijackedConnection(sopts.RawTerminal, sopts.InputStream, sopts.OutputStream, sopts.ErrorStream, resp) attachExit <- true }() // this error check should handle the result from the d.client.ContainerAttach call ... we progress to start when that occurs select { case err := <-errorChannel: // in non-error scenarios, temporary tracing confirmed that // unless the container starts, then exits, the attach blocks and // never returns either a nil for success or whatever err it might // return if the attach failed if err != nil { return err } break case <-timeoutTimer.C: return fmt.Errorf("timed out waiting to attach to container %s ", containerName) } // Start the container glog.V(2).Infof("Starting container %q ...", containerName) if err := util.TimeoutAfter(DefaultDockerTimeout, "timeout after waiting %v for Docker to start container", func() error { return d.kubeDockerClient.StartContainer(container.ID) }); err != nil { return err } // Run OnStart hook if defined. OnStart might block, so we run it in a // new goroutine, and wait for it to be done later on. onStartDone := make(chan error, 1) if opts.OnStart != nil { go func() { onStartDone <- opts.OnStart(container.ID) }() } if opts.TargetImage { // When TargetImage is true, we're dealing with an invocation of `s2i build ... --run` // so this will, e.g., run a web server and block until the user interrupts it (or // the container exits normally). dump port/etc information for the user. dumpContainerInfo(container, d, image) } // Return an error if the exit code of the container is // non-zero. glog.V(4).Infof("Waiting for container %q to stop ...", containerName) ctx, cancel := getDefaultContext(math.MaxInt64 * time.Nanosecond) // infinite duration ... go does not expose max duration constant defer cancel() exitCode, err := d.client.ContainerWait(ctx, container.ID) if err != nil { return fmt.Errorf("waiting for container %q to stop: %v", containerName, err) } if exitCode != 0 { return errors.NewContainerError(container.ID, exitCode, "") } // FIXME: If Stdout or Stderr can be closed, close it to notify that // there won't be any more writes. This is a hack to close the write // half of a pipe so that the read half sees io.EOF. // In particular, this is needed to eventually terminate code that runs // on OnStart and blocks reading from the pipe. if c, ok := opts.Stdout.(io.Closer); ok { c.Close() } if c, ok := opts.Stderr.(io.Closer); ok { c.Close() } // OnStart must be done before we move on. if opts.OnStart != nil { if err = <-onStartDone; err != nil { return err } } // Run PostExec hook if defined. if opts.PostExec != nil { glog.V(2).Infof("Invoking PostExecute function") if err = opts.PostExec.PostExecute(container.ID, tarDestination); err != nil { return err } } select { case <-attachExit: return attachLoggingError case <-time.After(DefaultDockerTimeout): return nil } }) }
// RunContainer creates and starts a container using the image specified in opts // with the ability to stream input and/or output. func (d *stiDocker) RunContainer(opts RunContainerOptions) error { createOpts := opts.asDockerCreateContainerOptions() // get info about the specified image image := createOpts.Config.Image var ( imageMetadata *docker.Image err error ) if opts.PullImage { imageMetadata, err = d.CheckAndPullImage(image) } else { imageMetadata, err = d.client.InspectImage(image) } if err != nil { glog.Errorf("Unable to get image metadata for %s: %v", image, err) return err } cmd, tarDestination := runContainerTar(opts, imageMetadata) createOpts.Config.Cmd = cmd // Create a new container. glog.V(2).Infof("Creating container with options {Name:%q Config:%+v HostConfig:%+v} ...", createOpts.Name, createOpts.Config, createOpts.HostConfig) var container *docker.Container if err := util.TimeoutAfter(DefaultDockerTimeout, "timeout after waiting %v for Docker to create container", func() error { var createErr error container, createErr = d.client.CreateContainer(createOpts) return createErr }); err != nil { return err } containerName := containerNameOrID(container) // Container was created, so we defer its removal. defer func() { glog.V(4).Infof("Removing container %q ...", containerName) if err := d.RemoveContainer(container.ID); err != nil { glog.Warningf("Failed to remove container %q: %v", containerName, err) } else { glog.V(4).Infof("Removed container %q", containerName) } }() // Attach to the container. glog.V(2).Infof("Attaching to container %q ...", containerName) attachOpts := opts.asDockerAttachToContainerOptions() attachOpts.Container = container.ID if _, err = d.client.AttachToContainerNonBlocking(attachOpts); err != nil { glog.Errorf("Unable to attach to container %q with options %+v: %v", containerName, attachOpts, err) return err } // Start the container. glog.V(2).Infof("Starting container %q ...", containerName) if err := util.TimeoutAfter(DefaultDockerTimeout, "timeout after waiting %v for Docker to start container", func() error { return d.client.StartContainer(container.ID, nil) }); err != nil { return err } // Run OnStart hook if defined. OnStart might block, so we run it in a // new goroutine, and wait for it to be done later on. onStartDone := make(chan error, 1) if opts.OnStart != nil { go func() { onStartDone <- opts.OnStart(container.ID) }() } // We either block waiting for a user-originated SIGINT, or wait for the // container to terminate. When TargetImage is true, we're dealing with // an invocation of `s2i build ... --run` so this will, e.g., run a web // server and block until the user interrupts it. The other case is seen // when running the assemble script or other commands that are meant to // terminate in a finite amount of time. if opts.TargetImage { runContainerDockerRun(container, d, image) } else { // Return an error if the exit code of the container is // non-zero. glog.V(4).Infof("Waiting for container %q to stop ...", containerName) exitCode, err := d.client.WaitContainer(container.ID) if err != nil { return fmt.Errorf("waiting for container %q to stop: %v", containerName, err) } if exitCode != 0 { return errors.NewContainerError(container.Name, exitCode, "") } } // FIXME: If Stdout or Stderr can be closed, close it to notify that // there won't be any more writes. This is a hack to close the write // half of a pipe so that the read half sees io.EOF. // In particular, this is needed to eventually terminate code that runs // on OnStart and blocks reading from the pipe. if c, ok := opts.Stdout.(io.Closer); ok { c.Close() } if c, ok := opts.Stderr.(io.Closer); ok { c.Close() } // OnStart must be done before we move on. if opts.OnStart != nil { if err := <-onStartDone; err != nil { return err } } // Run PostExec hook if defined. if opts.PostExec != nil { glog.V(2).Infof("Invoking postExecution function") if err = opts.PostExec.PostExecute(container.ID, tarDestination); err != nil { return err } } return nil }
// RunContainer creates and starts a container using the image specified in opts // with the ability to stream input and/or output. func (d *stiDocker) RunContainer(opts RunContainerOptions) error { createOpts := opts.asDockerCreateContainerOptions() // get info about the specified image image := createOpts.Config.Image var ( imageMetadata *docker.Image err error ) if opts.PullImage { imageMetadata, err = d.CheckAndPullImage(image) } else { imageMetadata, err = d.client.InspectImage(image) } if err != nil { glog.Errorf("Unable to get image metadata for %s: %v", image, err) return err } cmd, tarDestination := runContainerTar(opts, imageMetadata) createOpts.Config.Cmd = cmd // Create a new container. glog.V(2).Infof("Creating container with options {Name:%q Config:%+v HostConfig:%+v} ...", createOpts.Name, createOpts.Config, createOpts.HostConfig) var container *docker.Container if err := util.TimeoutAfter(DefaultDockerTimeout, func() error { var createErr error container, createErr = d.client.CreateContainer(createOpts) return createErr }); err != nil { return err } containerName := containerNameOrID(container) // Container was created, so we defer its removal. defer func() { glog.V(4).Infof("Removing container %q ...", containerName) if err := d.RemoveContainer(container.ID); err != nil { glog.Warningf("Failed to remove container %q: %v", containerName, err) } else { glog.V(4).Infof("Removed container %q", containerName) } }() // Attach to the container. glog.V(2).Infof("Attaching to container %q ...", containerName) attachOpts := opts.asDockerAttachToContainerOptions() attachOpts.Container = container.ID if _, err = d.client.AttachToContainerNonBlocking(attachOpts); err != nil { glog.Errorf("Unable to attach to container %q with options %+v: %v", containerName, attachOpts, err) return err } // Start the container. glog.V(2).Infof("Starting container %q ...", containerName) if err := util.TimeoutAfter(DefaultDockerTimeout, func() error { return d.client.StartContainer(container.ID, nil) }); err != nil { return err } // Run OnStart hook if defined. if opts.OnStart != nil { if err = opts.OnStart(container.ID); err != nil { return err } } // We either block waiting for a user-originated SIGINT, or wait for the // container to terminate. When TargetImage is true, we're dealing with // an invocation of `s2i build ... --run` so this will, e.g., run a web // server and block until the user interrupts it. The other case is seen // when running the assemble script or other commands that are meant to // terminate in a finite amount of time. if opts.TargetImage { runContainerDockerRun(container, d, image) } else { // Return an error if the exit code of the container is // non-zero. glog.V(4).Infof("Waiting for container %q to stop ...", containerName) exitCode, err := d.client.WaitContainer(container.ID) if err != nil { return fmt.Errorf("waiting for container %q to stop: %v", containerName, err) } if exitCode != 0 { return errors.NewContainerError(container.Name, exitCode, "") } } // Run PostExec hook if defined. if opts.PostExec != nil { glog.V(2).Infof("Invoking postExecution function") if err = opts.PostExec.PostExecute(container.ID, tarDestination); err != nil { return err } } return nil }