Example #1
0
// Strategy creates the appropriate build strategy for the provided config, using
// the overrides provided. Not all strategies support all overrides.
func Strategy(config *api.Config, overrides build.Overrides) (build.Builder, api.BuildInfo, error) {
	var builder build.Builder
	var buildInfo api.BuildInfo

	image, err := docker.GetBuilderImage(config)
	if err != nil {
		buildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonPullBuilderImageFailed, utilstatus.ReasonMessagePullBuilderImageFailed)
		return nil, buildInfo, err
	}
	config.HasOnBuild = image.OnBuild

	// if we're blocking onbuild, just do a normal s2i build flow
	// which won't do a docker build and invoke the onbuild commands
	if image.OnBuild && !config.BlockOnBuild {
		builder, err = onbuild.New(config, overrides)
		if err != nil {
			buildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonGenericS2IBuildFailed, utilstatus.ReasonMessageGenericS2iBuildFailed)
			return nil, buildInfo, err
		}
		return builder, buildInfo, nil
	}

	builder, err = sti.New(config, overrides)
	if err != nil {
		buildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonGenericS2IBuildFailed, utilstatus.ReasonMessageGenericS2iBuildFailed)
		return nil, buildInfo, err
	}
	return builder, buildInfo, err
}
Example #2
0
func (step *downloadFilesFromBuilderImageStep) execute(ctx *postExecutorStepContext) error {
	glog.V(3).Info("Executing step: download files from the builder image")

	artifactsDir := filepath.Join(step.builder.config.WorkingDir, api.RuntimeArtifactsDir)
	if err := step.fs.Mkdir(artifactsDir); err != nil {
		step.builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(
			utilstatus.ReasonFSOperationFailed,
			utilstatus.ReasonMessageFSOperationFailed,
		)
		return fmt.Errorf("could not create directory %q: %v", artifactsDir, err)
	}

	for _, artifact := range step.builder.config.RuntimeArtifacts {
		if err := step.downloadAndExtractFile(artifact.Source, artifactsDir, ctx.containerID); err != nil {
			step.builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(
				utilstatus.ReasonRuntimeArtifactsFetchFailed,
				utilstatus.ReasonMessageRuntimeArtifactsFetchFailed,
			)
			return err
		}

		// for mapping like "/tmp/foo.txt -> app" we should create "app" and move "foo.txt" to that directory
		dstSubDir := path.Clean(artifact.Destination)
		if dstSubDir != "." && dstSubDir != "/" {
			dstDir := filepath.Join(artifactsDir, dstSubDir)
			glog.V(5).Infof("Creating directory %q", dstDir)
			if err := step.fs.MkdirAll(dstDir); err != nil {
				step.builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(
					utilstatus.ReasonFSOperationFailed,
					utilstatus.ReasonMessageFSOperationFailed,
				)
				return fmt.Errorf("could not create directory %q: %v", dstDir, err)
			}

			file := filepath.Base(artifact.Source)
			old := filepath.Join(artifactsDir, file)
			new := filepath.Join(artifactsDir, dstSubDir, file)
			glog.V(5).Infof("Renaming %q to %q", old, new)
			if err := step.fs.Rename(old, new); err != nil {
				step.builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(
					utilstatus.ReasonFSOperationFailed,
					utilstatus.ReasonMessageFSOperationFailed,
				)
				return fmt.Errorf("could not rename %q -> %q: %v", old, new, err)
			}
		}
	}

	return nil
}
Example #3
0
// Build processes a Request and returns a *api.Result and an error.
// An error represents a failure performing the build rather than a failure
// of the build itself.  Callers should check the Success field of the result
// to determine whether a build succeeded or not.
func (builder *STI) Build(config *api.Config) (*api.Result, error) {
	builder.result = &api.Result{}

	defer builder.garbage.Cleanup(config)

	glog.V(1).Infof("Preparing to build %s", config.Tag)
	// The failure reason is updated inside the Prepare function.
	if err := builder.preparer.Prepare(config); err != nil {
		return builder.result, err
	}

	if builder.incremental = builder.artifacts.Exists(config); builder.incremental {
		tag := firstNonEmpty(config.IncrementalFromTag, config.Tag)
		glog.V(1).Infof("Existing image for tag %s detected for incremental build", tag)
	} else {
		glog.V(1).Info("Clean build will be performed")
	}

	glog.V(2).Infof("Performing source build from %s", config.Source)
	if builder.incremental {
		if err := builder.artifacts.Save(config); err != nil {
			glog.Warning("Clean build will be performed because of error saving previous build artifacts")
			glog.V(2).Infof("error: %v", err)
		}
	}

	if len(config.AssembleUser) > 0 {
		glog.V(1).Infof("Running %q in %q as %q user", api.Assemble, config.Tag, config.AssembleUser)
	} else {
		glog.V(1).Infof("Running %q in %q", api.Assemble, config.Tag)
	}
	if err := builder.scripts.Execute(api.Assemble, config.AssembleUser, config); err != nil {
		builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonAssembleFailed, utilstatus.ReasonMessageAssembleFailed)

		switch e := err.(type) {
		case errors.ContainerError:
			if !isMissingRequirements(e.Output) {
				builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonUnmetS2IDependencies, utilstatus.ReasonMessageUnmetS2IDependencies)
				return builder.result, err
			}
			glog.V(1).Info("Image is missing basic requirements (sh or tar), layered build will be performed")
			return builder.layered.Build(config)
		default:
			return builder.result, err
		}
	}
	builder.result.Success = true

	return builder.result, nil
}
Example #4
0
func (step *commitImageStep) execute(ctx *postExecutorStepContext) error {
	glog.V(3).Infof("Executing step: commit image")

	user, err := step.docker.GetImageUser(step.image)
	if err != nil {
		return fmt.Errorf("Couldn't get user of %q image: %v", step.image, err)
	}

	cmd := createCommandForExecutingRunScript(step.builder.scriptsURL, ctx.destination)

	ctx.labels = createLabelsForResultingImage(step.builder, step.docker, step.image)

	// Set the image entrypoint back to its original value on commit, the running
	// container has "env" as its entrypoint and we don't want to commit that.
	entrypoint, err := step.docker.GetImageEntrypoint(step.image)
	if err != nil {
		return fmt.Errorf("Couldn't get entrypoint of %q image: %v", step.image, err)
	}
	// If the image has no explicit entrypoint, set it to an empty array
	// so we don't default to leaving the entrypoint as "env" upon commit.
	if entrypoint == nil {
		entrypoint = []string{}
	}

	ctx.imageID, err = commitContainer(step.docker, ctx.containerID, cmd, user, step.builder.config.Tag, step.builder.env, entrypoint, ctx.labels)
	if err != nil {
		step.builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonCommitContainerFailed, utilstatus.ReasonMessageCommitContainerFailed)
		return err
	}

	return nil
}
Example #5
0
func (step *downloadFilesFromBuilderImageStep) downloadAndExtractFile(artifactPath, artifactsDir, containerID string) error {
	glog.V(5).Infof("Downloading file %q", artifactPath)

	fd, err := ioutil.TempFile(artifactsDir, "s2i-runtime-artifact")
	if err != nil {
		step.builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(
			utilstatus.ReasonFSOperationFailed,
			utilstatus.ReasonMessageFSOperationFailed,
		)
		return fmt.Errorf("could not create temporary file for runtime artifact: %v", err)
	}
	defer func() {
		fd.Close()
		os.Remove(fd.Name())
	}()

	if err := step.docker.DownloadFromContainer(artifactPath, fd, containerID); err != nil {
		return fmt.Errorf("could not download file (%q -> %q) from container %s: %v", artifactPath, fd.Name(), containerID, err)
	}

	// after writing to the file descriptor we need to rewind pointer to the beginning of the file before next reading
	if _, err := fd.Seek(0, os.SEEK_SET); err != nil {
		step.builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(
			utilstatus.ReasonGenericS2IBuildFailed,
			utilstatus.ReasonMessageGenericS2iBuildFailed,
		)
		return fmt.Errorf("could not seek to the beginning of the file %q: %v", fd.Name(), err)
	}

	if err := step.tar.ExtractTarStream(artifactsDir, fd); err != nil {
		step.builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(
			utilstatus.ReasonGenericS2IBuildFailed,
			utilstatus.ReasonMessageGenericS2iBuildFailed,
		)
		return fmt.Errorf("could not extract runtime artifact %q into the directory %q: %v", artifactPath, artifactsDir, err)
	}

	return nil
}
Example #6
0
// Exists determines if the current build supports incremental workflow.
// It checks if the previous image exists in the system and if so, then it
// verifies that the save-artifacts script is present.
func (builder *STI) Exists(config *api.Config) bool {
	if !config.Incremental {
		return false
	}

	policy := config.PreviousImagePullPolicy
	if len(policy) == 0 {
		policy = api.DefaultPreviousImagePullPolicy
	}

	tag := firstNonEmpty(config.IncrementalFromTag, config.Tag)

	result, err := dockerpkg.PullImage(tag, builder.incrementalDocker, policy, false)
	if err != nil {
		builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(
			utilstatus.ReasonPullPreviousImageFailed,
			utilstatus.ReasonMessagePullPreviousImageFailed,
		)
		glog.V(2).Infof("Unable to pull previously built image %q: %v", tag, err)
		return false
	}

	return result.Image != nil && builder.installedScripts[api.SaveArtifacts]
}
Example #7
0
// Build handles the `docker build` equivalent execution, returning the
// success/failure details.
func (builder *Layered) Build(config *api.Config) (*api.Result, error) {
	buildResult := &api.Result{}

	if config.HasOnBuild && config.BlockOnBuild {
		buildResult.BuildInfo.FailureReason = utilstatus.NewFailureReason(
			utilstatus.ReasonOnBuildForbidden,
			utilstatus.ReasonMessageOnBuildForbidden,
		)
		return buildResult, errors.New("builder image uses ONBUILD instructions but ONBUILD is not allowed")
	}

	if config.BuilderImage == "" {
		buildResult.BuildInfo.FailureReason = utilstatus.NewFailureReason(
			utilstatus.ReasonGenericS2IBuildFailed,
			utilstatus.ReasonMessageGenericS2iBuildFailed,
		)
		return buildResult, errors.New("builder image name cannot be empty")
	}

	if err := builder.CreateDockerfile(config); err != nil {
		buildResult.BuildInfo.FailureReason = utilstatus.NewFailureReason(
			utilstatus.ReasonDockerfileCreateFailed,
			utilstatus.ReasonMessageDockerfileCreateFailed,
		)
		return buildResult, err
	}

	glog.V(2).Info("Creating application source code image")
	tarStream := builder.tar.CreateTarStreamReader(filepath.Join(config.WorkingDir, "upload"), false)
	defer tarStream.Close()

	newBuilderImage := fmt.Sprintf("s2i-layered-temp-image-%d", time.Now().UnixNano())

	outReader, outWriter := io.Pipe()
	opts := docker.BuildImageOptions{
		Name:         newBuilderImage,
		Stdin:        tarStream,
		Stdout:       outWriter,
		CGroupLimits: config.CGroupLimits,
	}
	docker.StreamContainerIO(outReader, nil, func(s string) { glog.V(2).Info(s) })

	glog.V(2).Infof("Building new image %s with scripts and sources already inside", newBuilderImage)
	if err := builder.docker.BuildImage(opts); err != nil {
		buildResult.BuildInfo.FailureReason = utilstatus.NewFailureReason(
			utilstatus.ReasonDockerImageBuildFailed,
			utilstatus.ReasonMessageDockerImageBuildFailed,
		)
		return buildResult, err
	}

	// upon successful build we need to modify current config
	builder.config.LayeredBuild = true
	// new image name
	builder.config.BuilderImage = newBuilderImage
	// see CreateDockerfile, conditional copy, location of scripts
	scriptsIncluded := checkValidDirWithContents(path.Join(config.WorkingDir, api.UploadScripts))
	glog.V(2).Infof("Scripts dir has contents %v", scriptsIncluded)
	if scriptsIncluded {
		builder.config.ScriptsURL = "image://" + path.Join(getDestination(config), "scripts")
	} else {
		var err error
		builder.config.ScriptsURL, err = builder.docker.GetScriptsURL(newBuilderImage)
		if err != nil {
			buildResult.BuildInfo.FailureReason = utilstatus.NewFailureReason(
				utilstatus.ReasonGenericS2IBuildFailed,
				utilstatus.ReasonMessageGenericS2iBuildFailed,
			)
			return buildResult, err
		}
	}

	glog.V(2).Infof("Building %s using sti-enabled image", builder.config.Tag)
	if err := builder.scripts.Execute(api.Assemble, config.AssembleUser, builder.config); err != nil {
		buildResult.BuildInfo.FailureReason = utilstatus.NewFailureReason(
			utilstatus.ReasonAssembleFailed,
			utilstatus.ReasonMessageAssembleFailed,
		)
		switch e := err.(type) {
		case s2ierr.ContainerError:
			return buildResult, s2ierr.NewAssembleError(builder.config.Tag, e.Output, e)
		default:
			return buildResult, err
		}
	}
	buildResult.Success = true

	return buildResult, nil
}
Example #8
0
// Execute runs the specified STI script in the builder image.
func (builder *STI) Execute(command string, user string, config *api.Config) error {
	glog.V(2).Infof("Using image name %s", config.BuilderImage)

	// we can't invoke this method before (for example in New() method)
	// because of later initialization of config.WorkingDir
	builder.env = createBuildEnvironment(config)

	errOutput := ""
	outReader, outWriter := io.Pipe()
	errReader, errWriter := io.Pipe()
	defer outReader.Close()
	defer outWriter.Close()
	defer errReader.Close()
	defer errWriter.Close()
	externalScripts := builder.externalScripts[command]
	// if LayeredBuild is called then all the scripts will be placed inside the image
	if config.LayeredBuild {
		externalScripts = false
	}

	opts := dockerpkg.RunContainerOptions{
		Image:  config.BuilderImage,
		Stdout: outWriter,
		Stderr: errWriter,
		// The PullImage is false because the PullImage function should be called
		// before we run the container
		PullImage:       false,
		ExternalScripts: externalScripts,
		ScriptsURL:      config.ScriptsURL,
		Destination:     config.Destination,
		Command:         command,
		Env:             builder.env,
		User:            user,
		PostExec:        builder.postExecutor,
		NetworkMode:     string(config.DockerNetworkMode),
		CGroupLimits:    config.CGroupLimits,
		CapDrop:         config.DropCapabilities,
		Binds:           config.BuildVolumes.AsBinds(),
	}

	// If there are injections specified, override the original assemble script
	// and wait till all injections are uploaded into the container that runs the
	// assemble script.
	injectionComplete := make(chan struct{})
	var injectionError error
	if len(config.Injections) > 0 && command == api.Assemble {
		workdir, err := builder.docker.GetImageWorkdir(config.BuilderImage)
		if err != nil {
			builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonGenericS2IBuildFailed, utilstatus.ReasonMessageGenericS2iBuildFailed)
			return err
		}
		config.Injections = util.FixInjectionsWithRelativePath(workdir, config.Injections)
		injectedFiles, err := util.ExpandInjectedFiles(config.Injections)
		if err != nil {
			builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonInstallScriptsFailed, utilstatus.ReasonMessageInstallScriptsFailed)
			return err
		}
		rmScript, err := util.CreateInjectedFilesRemovalScript(injectedFiles, "/tmp/rm-injections")
		if err != nil {
			builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonGenericS2IBuildFailed, utilstatus.ReasonMessageGenericS2iBuildFailed)
			return err
		}
		defer os.Remove(rmScript)
		opts.CommandOverrides = func(cmd string) string {
			return fmt.Sprintf("while [ ! -f %q ]; do sleep 0.5; done; %s; result=$?; source %[1]s; exit $result",
				"/tmp/rm-injections", cmd)
		}
		originalOnStart := opts.OnStart
		opts.OnStart = func(containerID string) error {
			defer close(injectionComplete)
			if err != nil {
				injectionError = err
				return err
			}
			glog.V(2).Info("starting the injections uploading ...")
			for _, s := range config.Injections {
				if err := builder.docker.UploadToContainer(s.Source, s.Destination, containerID); err != nil {
					injectionError = util.HandleInjectionError(s, err)
					return err
				}
			}
			if err := builder.docker.UploadToContainer(rmScript, "/tmp/rm-injections", containerID); err != nil {
				injectionError = util.HandleInjectionError(api.VolumeSpec{Source: rmScript, Destination: "/tmp/rm-injections"}, err)
				return err
			}
			if originalOnStart != nil {
				return originalOnStart(containerID)
			}
			return nil
		}
	} else {
		close(injectionComplete)
	}

	wg := sync.WaitGroup{}
	if !config.LayeredBuild {
		wg.Add(1)
		uploadDir := filepath.Join(config.WorkingDir, "upload")
		// TODO: be able to pass a stream directly to the Docker build to avoid the double temp hit
		r, w := io.Pipe()
		go func() {
			// reminder, multiple defers follow a stack, LIFO order of processing
			defer wg.Done()
			// Wait for the injections to complete and check the error. Do not start
			// streaming the sources when the injection failed.
			<-injectionComplete
			if injectionError != nil {
				return
			}
			glog.V(2).Info("starting the source uploading ...")
			var err error
			defer func() {
				w.CloseWithError(err)
				if r := recover(); r != nil {
					glog.Errorf("recovered panic: %#v", r)
				}
			}()
			err = builder.tar.CreateTarStream(uploadDir, false, w)
		}()

		opts.Stdin = r
	}

	go func(reader io.Reader) {
		scanner := bufio.NewReader(reader)
		// Precede build output with newline
		glog.Info()
		for {
			text, err := scanner.ReadString('\n')
			if err != nil {
				// we're ignoring ErrClosedPipe, as this is information
				// the docker container ended streaming logs
				if glog.Is(2) && err != io.ErrClosedPipe && err != io.EOF {
					glog.Errorf("Error reading docker stdout, %#v", err)
				}
				break
			}
			// Nothing is printed when the quiet option is set
			if config.Quiet {
				continue
			}
			glog.Info(strings.TrimSpace(text))
		}
		// Terminate build output with new line
		glog.Info()

	}(outReader)

	go dockerpkg.StreamContainerIO(errReader, &errOutput, func(a ...interface{}) { glog.Info(a...) })

	err := builder.docker.RunContainer(opts)
	if e, ok := err.(errors.ContainerError); ok {
		// even with deferred close above, close errReader now so we avoid data race condition on errOutput;
		// closing will cause StreamContainerIO to exit, thus releasing the writer in the equation
		errReader.Close()
		return errors.NewContainerError(config.BuilderImage, e.ErrorCode, errOutput)
	}
	// Do not wait for source input if the container times out.
	// FIXME: this potentially leaks a goroutine.
	if !util.IsTimeoutError(err) {
		wg.Wait()
	}
	return err
}
Example #9
0
// Save extracts and restores the build artifacts from the previous build to a
// current build.
func (builder *STI) Save(config *api.Config) (err error) {
	artifactTmpDir := filepath.Join(config.WorkingDir, "upload", "artifacts")
	if builder.result == nil {
		builder.result = &api.Result{}
	}

	if err = builder.fs.Mkdir(artifactTmpDir); err != nil {
		builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonFSOperationFailed, utilstatus.ReasonMessageFSOperationFailed)
		return err
	}

	image := firstNonEmpty(config.IncrementalFromTag, config.Tag)

	outReader, outWriter := io.Pipe()
	defer outReader.Close()
	defer outWriter.Close()
	errReader, errWriter := io.Pipe()
	defer errReader.Close()
	defer errWriter.Close()
	glog.V(1).Infof("Saving build artifacts from image %s to path %s", image, artifactTmpDir)
	extractFunc := func(string) error {
		return builder.tar.ExtractTarStream(artifactTmpDir, outReader)
	}

	user := config.AssembleUser
	if len(user) == 0 {
		user, err = builder.docker.GetImageUser(image)
		if err != nil {
			builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonGenericS2IBuildFailed, utilstatus.ReasonMessageGenericS2iBuildFailed)
			return err
		}
		glog.V(3).Infof("The assemble user is not set, defaulting to %q user", user)
	} else {
		glog.V(3).Infof("Using assemble user %q to extract artifacts", user)
	}

	opts := dockerpkg.RunContainerOptions{
		Image:           image,
		User:            user,
		ExternalScripts: builder.externalScripts[api.SaveArtifacts],
		ScriptsURL:      config.ScriptsURL,
		Destination:     config.Destination,
		PullImage:       false,
		Command:         api.SaveArtifacts,
		Stdout:          outWriter,
		Stderr:          errWriter,
		OnStart:         extractFunc,
		NetworkMode:     string(config.DockerNetworkMode),
		CGroupLimits:    config.CGroupLimits,
		CapDrop:         config.DropCapabilities,
	}

	go dockerpkg.StreamContainerIO(errReader, nil, func(a ...interface{}) { glog.Info(a...) })
	err = builder.docker.RunContainer(opts)
	if e, ok := err.(errors.ContainerError); ok {
		// even with deferred close above, close errReader now so we avoid data
		// race condition on errOutput;
		// closing will cause StreamContainerIO to exit, thus releasing the writer in
		// the equation
		errReader.Close()
		err = errors.NewSaveArtifactsError(image, e.Output, err)
	}

	builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonGenericS2IBuildFailed, utilstatus.ReasonMessageGenericS2iBuildFailed)
	return err
}
Example #10
0
// Prepare prepares the source code and tar for build.
// NOTE: this func serves both the sti and onbuild strategies, as the OnBuild
// struct Build func leverages the STI struct Prepare func directly below.
func (builder *STI) Prepare(config *api.Config) error {
	var err error
	if builder.result == nil {
		builder.result = &api.Result{}
	}

	if len(config.WorkingDir) == 0 {
		if config.WorkingDir, err = builder.fs.CreateWorkingDirectory(); err != nil {
			builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonFSOperationFailed, utilstatus.ReasonMessageFSOperationFailed)
			return err
		}
	}

	builder.result.WorkingDir = config.WorkingDir

	if len(config.RuntimeImage) > 0 {
		if err = dockerpkg.GetRuntimeImage(config, builder.runtimeDocker); err != nil {
			builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonPullRuntimeImageFailed, utilstatus.ReasonMessagePullRuntimeImageFailed)
			glog.Errorf("Unable to pull runtime image %q: %v", config.RuntimeImage, err)
			return err
		}

		// user didn't specify mapping, let's take it from the runtime image then
		if len(builder.config.RuntimeArtifacts) == 0 {
			mapping, err := builder.docker.GetAssembleInputFiles(config.RuntimeImage)
			if err != nil {
				builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonInvalidArtifactsMapping, utilstatus.ReasonMessageInvalidArtifactsMapping)
				return err
			}
			if len(mapping) == 0 {
				builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonGenericS2IBuildFailed, utilstatus.ReasonMessageGenericS2iBuildFailed)
				return fmt.Errorf("No runtime artifacts to copy were specified")
			}
			for _, value := range strings.Split(mapping, ";") {
				if err = builder.config.RuntimeArtifacts.Set(value); err != nil {
					builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonGenericS2IBuildFailed, utilstatus.ReasonMessageGenericS2iBuildFailed)
					return fmt.Errorf("Couldn't parse %q label with value %q on image %q: %v",
						dockerpkg.AssembleInputFilesLabel, mapping, config.RuntimeImage, err)
				}
			}
		}
		// we're validating values here to be sure that we're handling both of the cases of the invocation:
		// from main() and as a method from OpenShift
		for _, volumeSpec := range builder.config.RuntimeArtifacts {
			var volumeErr error

			switch {
			case !path.IsAbs(volumeSpec.Source):
				volumeErr = fmt.Errorf("Invalid runtime artifacts mapping: %q -> %q: source must be an absolute path", volumeSpec.Source, volumeSpec.Destination)
			case path.IsAbs(volumeSpec.Destination):
				volumeErr = fmt.Errorf("Invalid runtime artifacts mapping: %q -> %q: destination must be a relative path", volumeSpec.Source, volumeSpec.Destination)
			case strings.HasPrefix(volumeSpec.Destination, ".."):
				volumeErr = fmt.Errorf("Invalid runtime artifacts mapping: %q -> %q: destination cannot start with '..'", volumeSpec.Source, volumeSpec.Destination)
			default:
				continue
			}
			if volumeErr != nil {
				builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonInvalidArtifactsMapping, utilstatus.ReasonMessageInvalidArtifactsMapping)
				return volumeErr
			}
		}
	}

	// Setup working directories
	for _, v := range workingDirs {
		if err = builder.fs.MkdirAll(filepath.Join(config.WorkingDir, v)); err != nil {
			builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonFSOperationFailed, utilstatus.ReasonMessageFSOperationFailed)
			return err
		}
	}

	// fetch sources, for their .s2i/bin might contain s2i scripts
	if len(config.Source) > 0 {
		if builder.sourceInfo, err = builder.source.Download(config); err != nil {
			builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonFetchSourceFailed, utilstatus.ReasonMessageFetchSourceFailed)
			return err
		}
	}

	// get the scripts
	required, err := builder.installer.InstallRequired(builder.requiredScripts, config.WorkingDir)
	if err != nil {
		builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonInstallScriptsFailed, utilstatus.ReasonMessageInstallScriptsFailed)
		return err
	}
	optional := builder.installer.InstallOptional(builder.optionalScripts, config.WorkingDir)

	requiredAndOptional := append(required, optional...)

	if len(config.RuntimeImage) > 0 && builder.runtimeInstaller != nil {
		optionalRuntime := builder.runtimeInstaller.InstallOptional(builder.optionalRuntimeScripts, config.WorkingDir)
		requiredAndOptional = append(requiredAndOptional, optionalRuntime...)
	}

	// If a ScriptsURL was specified, but no scripts were downloaded from it, throw an error
	if len(config.ScriptsURL) > 0 {
		failedCount := 0
		for _, result := range requiredAndOptional {
			if includes(result.FailedSources, scripts.ScriptURLHandler) {
				failedCount++
			}
		}
		if failedCount == len(requiredAndOptional) {
			builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonArtifactsFetchFailed, utilstatus.ReasonMessageArtifactsFetchFailed)
			return fmt.Errorf("Could not download any scripts from URL %v", config.ScriptsURL)
		}
	}

	for _, r := range requiredAndOptional {
		if r.Error != nil {
			glog.Warningf("Error getting %v from %s: %v", r.Script, r.URL, r.Error)
			continue
		}

		builder.externalScripts[r.Script] = r.Downloaded
		builder.installedScripts[r.Script] = r.Installed
		builder.scriptsURL[r.Script] = r.URL
	}

	// see if there is a .s2iignore file, and if so, read in the patterns an then
	// search and delete on
	return builder.ignorer.Ignore(config)
}
Example #11
0
// Build executes the ONBUILD kind of build
func (builder *OnBuild) Build(config *api.Config) (*api.Result, error) {
	buildResult := &api.Result{}

	if config.BlockOnBuild {
		buildResult.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonOnBuildForbidden, utilstatus.ReasonMessageOnBuildForbidden)
		return buildResult, fmt.Errorf("builder image uses ONBUILD instructions but ONBUILD is not allowed")
	}
	glog.V(2).Info("Preparing the source code for build")
	// Change the installation directory for this config to store scripts inside
	// the application root directory.
	if err := builder.source.Prepare(config); err != nil {
		return buildResult, err
	}

	// If necessary, copy the STI scripts into application root directory
	builder.copySTIScripts(config)

	glog.V(2).Info("Creating application Dockerfile")
	if err := builder.CreateDockerfile(config); err != nil {
		buildResult.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonDockerfileCreateFailed, utilstatus.ReasonMessageDockerfileCreateFailed)
		return buildResult, err
	}

	glog.V(2).Info("Creating application source code image")
	tarStream, err := builder.SourceTar(config)
	if err != nil {
		buildResult.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonTarSourceFailed, utilstatus.ReasonMessageTarSourceFailed)
		return buildResult, err
	}
	defer tarStream.Close()

	opts := docker.BuildImageOptions{
		Name:         config.Tag,
		Stdin:        tarStream,
		Stdout:       os.Stdout,
		CGroupLimits: config.CGroupLimits,
	}

	glog.V(2).Info("Building the application source")
	if err = builder.docker.BuildImage(opts); err != nil {
		buildResult.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonDockerImageBuildFailed, utilstatus.ReasonMessageDockerImageBuildFailed)
		return buildResult, err
	}

	glog.V(2).Info("Cleaning up temporary containers")
	builder.garbage.Cleanup(config)

	var imageID string

	if len(opts.Name) > 0 {
		if imageID, err = builder.docker.GetImageID(opts.Name); err != nil {
			buildResult.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonGenericS2IBuildFailed, utilstatus.ReasonMessageGenericS2iBuildFailed)
			return buildResult, err
		}
	}

	return &api.Result{
		Success:    true,
		WorkingDir: config.WorkingDir,
		ImageID:    imageID,
	}, nil
}
Example #12
0
// Build handles the `docker build` equivalent execution, returning the
// success/failure details.
func (builder *Layered) Build(config *api.Config) (*api.Result, error) {
	buildResult := &api.Result{}

	if config.HasOnBuild && config.BlockOnBuild {
		buildResult.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonOnBuildForbidden, utilstatus.ReasonMessageOnBuildForbidden)
		return buildResult, fmt.Errorf("builder image uses ONBUILD instructions but ONBUILD is not allowed")
	}

	if config.BuilderImage == "" {
		buildResult.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonGenericS2IBuildFailed, utilstatus.ReasonMessageGenericS2iBuildFailed)
		return buildResult, fmt.Errorf("builder image name cannot be empty")
	}

	if err := builder.CreateDockerfile(config); err != nil {
		buildResult.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonDockerfileCreateFailed, utilstatus.ReasonMessageDockerfileCreateFailed)
		return buildResult, err
	}

	glog.V(2).Info("Creating application source code image")
	tarStream, err := builder.SourceTar(config)
	if err != nil {
		buildResult.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonTarSourceFailed, utilstatus.ReasonMessageTarSourceFailed)
		return buildResult, err
	}
	defer tarStream.Close()

	newBuilderImage := fmt.Sprintf("s2i-layered-temp-image-%d", time.Now().UnixNano())

	outReader, outWriter := io.Pipe()
	defer outReader.Close()
	defer outWriter.Close()
	opts := docker.BuildImageOptions{
		Name:         newBuilderImage,
		Stdin:        tarStream,
		Stdout:       outWriter,
		CGroupLimits: config.CGroupLimits,
	}
	// goroutine to stream container's output
	go func(reader io.Reader) {
		scanner := bufio.NewReader(reader)
		for {
			text, err := scanner.ReadString('\n')
			if err != nil {
				// we're ignoring ErrClosedPipe, as this is information
				// the docker container ended streaming logs
				if glog.Is(2) && err != io.ErrClosedPipe && err != io.EOF {
					glog.Errorf("Error reading docker stdout, %v", err)
				}
				break
			}
			glog.V(2).Info(text)
		}
	}(outReader)

	glog.V(2).Infof("Building new image %s with scripts and sources already inside", newBuilderImage)
	if err = builder.docker.BuildImage(opts); err != nil {
		buildResult.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonDockerImageBuildFailed, utilstatus.ReasonMessageDockerImageBuildFailed)
		return buildResult, err
	}

	// upon successful build we need to modify current config
	builder.config.LayeredBuild = true
	// new image name
	builder.config.BuilderImage = newBuilderImage
	// see CreateDockerfile, conditional copy, location of scripts
	scriptsIncluded := checkValidDirWithContents(path.Join(config.WorkingDir, api.UploadScripts))
	glog.V(2).Infof("Scripts dir has contents %v", scriptsIncluded)
	if scriptsIncluded {
		builder.config.ScriptsURL = "image://" + path.Join(getDestination(config), "scripts")
	} else {
		builder.config.ScriptsURL, err = builder.docker.GetScriptsURL(newBuilderImage)
		if err != nil {
			buildResult.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonGenericS2IBuildFailed, utilstatus.ReasonMessageGenericS2iBuildFailed)
			return buildResult, err
		}
	}

	glog.V(2).Infof("Building %s using sti-enabled image", builder.config.Tag)
	if err := builder.scripts.Execute(api.Assemble, config.AssembleUser, builder.config); err != nil {
		buildResult.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonAssembleFailed, utilstatus.ReasonMessageAssembleFailed)
		switch e := err.(type) {
		case errors.ContainerError:
			return buildResult, errors.NewAssembleError(builder.config.Tag, e.Output, e)
		default:
			return buildResult, err
		}
	}
	buildResult.Success = true

	return buildResult, nil
}
Example #13
0
// Execute runs the specified STI script in the builder image.
func (builder *STI) Execute(command string, user string, config *api.Config) error {
	glog.V(2).Infof("Using image name %s", config.BuilderImage)

	// we can't invoke this method before (for example in New() method)
	// because of later initialization of config.WorkingDir
	builder.env = createBuildEnvironment(config)

	errOutput := ""
	outReader, outWriter := io.Pipe()
	errReader, errWriter := io.Pipe()
	externalScripts := builder.externalScripts[command]
	// if LayeredBuild is called then all the scripts will be placed inside the image
	if config.LayeredBuild {
		externalScripts = false
	}

	opts := dockerpkg.RunContainerOptions{
		Image:  config.BuilderImage,
		Stdout: outWriter,
		Stderr: errWriter,
		// The PullImage is false because the PullImage function should be called
		// before we run the container
		PullImage:       false,
		ExternalScripts: externalScripts,
		ScriptsURL:      config.ScriptsURL,
		Destination:     config.Destination,
		Command:         command,
		Env:             builder.env,
		User:            user,
		PostExec:        builder.postExecutor,
		NetworkMode:     string(config.DockerNetworkMode),
		CGroupLimits:    config.CGroupLimits,
		CapDrop:         config.DropCapabilities,
		Binds:           config.BuildVolumes.AsBinds(),
	}

	// If there are injections specified, override the original assemble script
	// and wait till all injections are uploaded into the container that runs the
	// assemble script.
	injectionError := make(chan error)
	if len(config.Injections) > 0 && command == api.Assemble {
		workdir, err := builder.docker.GetImageWorkdir(config.BuilderImage)
		if err != nil {
			builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(
				utilstatus.ReasonGenericS2IBuildFailed,
				utilstatus.ReasonMessageGenericS2iBuildFailed,
			)
			return err
		}
		config.Injections = util.FixInjectionsWithRelativePath(workdir, config.Injections)
		injectedFiles, err := util.ExpandInjectedFiles(builder.fs, config.Injections)
		if err != nil {
			builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(
				utilstatus.ReasonInstallScriptsFailed,
				utilstatus.ReasonMessageInstallScriptsFailed,
			)
			return err
		}
		rmScript, err := util.CreateInjectedFilesRemovalScript(injectedFiles, "/tmp/rm-injections")
		if err != nil {
			builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(
				utilstatus.ReasonGenericS2IBuildFailed,
				utilstatus.ReasonMessageGenericS2iBuildFailed,
			)
			return err
		}
		defer os.Remove(rmScript)
		opts.CommandOverrides = func(cmd string) string {
			return fmt.Sprintf("while [ ! -f %q ]; do sleep 0.5; done; %s; result=$?; source %[1]s; exit $result",
				"/tmp/rm-injections", cmd)
		}
		originalOnStart := opts.OnStart
		opts.OnStart = func(containerID string) error {
			defer close(injectionError)
			glog.V(2).Info("starting the injections uploading ...")
			for _, s := range config.Injections {
				if err := builder.docker.UploadToContainer(builder.fs, s.Source, s.Destination, containerID); err != nil {
					injectionError <- util.HandleInjectionError(s, err)
					return err
				}
			}
			if err := builder.docker.UploadToContainer(builder.fs, rmScript, "/tmp/rm-injections", containerID); err != nil {
				injectionError <- util.HandleInjectionError(api.VolumeSpec{Source: rmScript, Destination: "/tmp/rm-injections"}, err)
				return err
			}
			if originalOnStart != nil {
				return originalOnStart(containerID)
			}
			return nil
		}
	} else {
		close(injectionError)
	}

	if !config.LayeredBuild {
		r, w := io.Pipe()
		opts.Stdin = r

		go func() {
			// Wait for the injections to complete and check the error. Do not start
			// streaming the sources when the injection failed.
			if <-injectionError != nil {
				w.Close()
				return
			}
			glog.V(2).Info("starting the source uploading ...")
			uploadDir := filepath.Join(config.WorkingDir, "upload")
			w.CloseWithError(builder.tar.CreateTarStream(uploadDir, false, w))
		}()
	}

	dockerpkg.StreamContainerIO(outReader, nil, func(s string) {
		if !config.Quiet {
			glog.Info(strings.TrimSpace(s))
		}
	})

	c := dockerpkg.StreamContainerIO(errReader, &errOutput, func(s string) { glog.Info(s) })

	err := builder.docker.RunContainer(opts)
	if e, ok := err.(s2ierr.ContainerError); ok {
		// Must wait for StreamContainerIO goroutine above to exit before reading errOutput.
		<-c
		err = s2ierr.NewContainerError(config.BuilderImage, e.ErrorCode, errOutput)
	}

	return err
}
Example #14
0
// Save extracts and restores the build artifacts from the previous build to a
// current build.
func (builder *STI) Save(config *api.Config) (err error) {
	artifactTmpDir := filepath.Join(config.WorkingDir, "upload", "artifacts")
	if builder.result == nil {
		builder.result = &api.Result{}
	}

	if err = builder.fs.Mkdir(artifactTmpDir); err != nil {
		builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(
			utilstatus.ReasonFSOperationFailed,
			utilstatus.ReasonMessageFSOperationFailed,
		)
		return err
	}

	image := firstNonEmpty(config.IncrementalFromTag, config.Tag)

	outReader, outWriter := io.Pipe()
	errReader, errWriter := io.Pipe()
	glog.V(1).Infof("Saving build artifacts from image %s to path %s", image, artifactTmpDir)
	extractFunc := func(string) error {
		extractErr := builder.tar.ExtractTarStream(artifactTmpDir, outReader)
		io.Copy(ioutil.Discard, outReader) // must ensure reader from container is drained
		return extractErr
	}

	user := config.AssembleUser
	if len(user) == 0 {
		user, err = builder.docker.GetImageUser(image)
		if err != nil {
			builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(
				utilstatus.ReasonGenericS2IBuildFailed,
				utilstatus.ReasonMessageGenericS2iBuildFailed,
			)
			return err
		}
		glog.V(3).Infof("The assemble user is not set, defaulting to %q user", user)
	} else {
		glog.V(3).Infof("Using assemble user %q to extract artifacts", user)
	}

	opts := dockerpkg.RunContainerOptions{
		Image:           image,
		User:            user,
		ExternalScripts: builder.externalScripts[api.SaveArtifacts],
		ScriptsURL:      config.ScriptsURL,
		Destination:     config.Destination,
		PullImage:       false,
		Command:         api.SaveArtifacts,
		Stdout:          outWriter,
		Stderr:          errWriter,
		OnStart:         extractFunc,
		NetworkMode:     string(config.DockerNetworkMode),
		CGroupLimits:    config.CGroupLimits,
		CapDrop:         config.DropCapabilities,
	}

	dockerpkg.StreamContainerIO(errReader, nil, func(s string) { glog.Info(s) })
	err = builder.docker.RunContainer(opts)
	if e, ok := err.(s2ierr.ContainerError); ok {
		err = s2ierr.NewSaveArtifactsError(image, e.Output, err)
	}

	builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(
		utilstatus.ReasonGenericS2IBuildFailed,
		utilstatus.ReasonMessageGenericS2iBuildFailed,
	)
	return err
}