// LocalPath returns the local path of the source repository func (r *SourceRepository) LocalPath() (string, error) { if len(r.localDir) > 0 { return r.localDir, nil } switch { case r.url.Scheme == "file": r.localDir = filepath.Join(r.url.Path, r.contextDir) default: gitRepo := git.NewRepository() var err error if r.localDir, err = ioutil.TempDir("", "gen"); err != nil { return "", err } localURL := r.url ref := localURL.Fragment localURL.Fragment = "" if err = gitRepo.Clone(r.localDir, localURL.String()); err != nil { return "", fmt.Errorf("cannot clone repository %s: %v", localURL.String(), err) } if len(ref) > 0 { if err = gitRepo.Checkout(r.localDir, ref); err != nil { return "", fmt.Errorf("cannot checkout ref %s of repository %s: %v", ref, localURL.String(), err) } } r.localDir = filepath.Join(r.localDir, r.contextDir) } return r.localDir, nil }
// RemoteURL returns the remote URL of the source repository func (r *SourceRepository) RemoteURL() (*url.URL, bool, error) { if r.remoteURL != nil { return r.remoteURL, true, nil } switch r.url.Scheme { case "file": gitRepo := git.NewRepository() remote, ok, err := gitRepo.GetOriginURL(r.url.Path) if err != nil && err != git.ErrGitNotAvailable { return nil, false, err } if !ok { return nil, ok, nil } ref := gitRepo.GetRef(r.url.Path) if len(ref) > 0 { remote = fmt.Sprintf("%s#%s", remote, ref) } if r.remoteURL, err = git.ParseRepository(remote); err != nil { return nil, false, err } default: r.remoteURL = &r.url } return r.remoteURL, true, nil }
// NewBuildStrategyRefGenerator creates a BuildStrategyRefGenerator func NewBuildStrategyRefGenerator(sourceDetectors source.Detectors) *BuildStrategyRefGenerator { return &BuildStrategyRefGenerator{ gitRepository: git.NewRepository(), dockerfileFinder: dockerfile.NewFinder(), sourceDetectors: sourceDetectors, imageRefGenerator: NewImageRefGenerator(), } }
// IsRemoteRepository checks whether the provided string is a remote repository or not func IsRemoteRepository(s string) bool { if !s2igit.New().ValidCloneSpecRemoteOnly(s) { return false } gitRepo := git.NewRepository() if err := gitRepo.ListRemote(s); err != nil { return false } return true }
// IsRemoteRepository checks whether the provided string is a remote repository or not func IsRemoteRepository(s string) bool { if !s2igit.New(s2iutil.NewFileSystem()).ValidCloneSpecRemoteOnly(s) { return false } url, err := url.Parse(s) if err != nil { return false } url.Fragment = "" gitRepo := git.NewRepository() if _, _, err := gitRepo.ListRemote(url.String()); err != nil { return false } return true }
// newTestS2IBuilder creates a mock implementation of S2IBuilder, instrumenting // different parts to return specific errors according to config. func newTestS2IBuilder(config testS2IBuilderConfig) *S2IBuilder { return newS2IBuilder( &FakeDocker{ errPushImage: config.errPushImage, }, "/docker.socket", testclient.NewSimpleFake().Builds(""), makeBuild(), git.NewRepository(), testStiBuilderFactory{ getStrategyErr: config.getStrategyErr, buildError: config.buildError, }, runtimeConfigValidator{}, ) }
// creates mock implemenation of STI builder, instrumenting different parts of a process to return errors func makeStiBuilder( errPushImage error, getStrategyErr error, buildError error, validationErrors []validation.ValidationError) S2IBuilder { return *newS2IBuilder( testDockerClient{ errPushImage: errPushImage, }, "/docker.socket", testclient.NewSimpleFake().Builds(""), makeBuild(), git.NewRepository(), testStiBuilderFactory{getStrategyErr: getStrategyErr, buildError: buildError}, testStiConfigValidator{errors: validationErrors}, ) }
// LocalPath returns the local path of the source repository func (r *SourceRepository) LocalPath() (string, error) { if len(r.localDir) > 0 { return r.localDir, nil } switch { case r.url.Scheme == "file": r.localDir = filepath.Join(r.url.Path, r.contextDir) default: gitRepo := git.NewRepository() var err error if r.localDir, err = ioutil.TempDir("", "gen"); err != nil { return "", err } localURL, ref := cloneURLAndRef(&r.url) r.localDir, err = CloneAndCheckoutSources(gitRepo, localURL.String(), ref, r.localDir, r.contextDir) if err != nil { return "", err } } return r.localDir, nil }
// RemoteURL returns the remote URL of the source repository func (r *SourceRepository) RemoteURL() (*url.URL, error) { if r.remoteURL != nil { return r.remoteURL, nil } switch r.url.Scheme { case "file": gitRepo := git.NewRepository() remote, _, err := gitRepo.GetOriginURL(r.url.Path) if err != nil { return nil, err } ref := gitRepo.GetRef(r.url.Path) if len(ref) > 0 { remote = fmt.Sprintf("%s#%s", remote, ref) } if r.remoteURL, err = url.Parse(remote); err != nil { return nil, err } default: r.remoteURL = &r.url } return r.remoteURL, nil }
// RunStartBuild contains all the necessary functionality for the OpenShift cli start-build command func RunStartBuild(f *clientcmd.Factory, out io.Writer, cmd *cobra.Command, args []string, webhooks util.StringFlag) error { webhook := cmdutil.GetFlagString(cmd, "from-webhook") buildName := cmdutil.GetFlagString(cmd, "from-build") follow := cmdutil.GetFlagBool(cmd, "follow") switch { case len(webhook) > 0: if len(args) > 0 || len(buildName) > 0 { return cmdutil.UsageError(cmd, "The '--from-webhook' flag is incompatible with arguments or '--from-build'") } path := cmdutil.GetFlagString(cmd, "git-repository") postReceivePath := cmdutil.GetFlagString(cmd, "git-post-receive") repo := git.NewRepository() return RunStartBuildWebHook(f, out, webhook, path, postReceivePath, repo) case len(args) != 1 && len(buildName) == 0: return cmdutil.UsageError(cmd, "Must pass a name of a BuildConfig or specify build name with '--from-build' flag") } name := buildName isBuild := true if len(name) == 0 { name = args[0] isBuild = false } if webhooks.Provided() { return RunListBuildWebHooks(f, out, cmd.Out(), name, isBuild, webhooks.String()) } client, _, err := f.Clients() if err != nil { return err } namespace, _, err := f.DefaultNamespace() if err != nil { return err } request := &buildapi.BuildRequest{ ObjectMeta: kapi.ObjectMeta{Name: name}, } var newBuild *buildapi.Build if isBuild { if newBuild, err = client.Builds(namespace).Clone(request); err != nil { return err } } else { if newBuild, err = client.BuildConfigs(namespace).Instantiate(request); err != nil { return err } } fmt.Fprintf(out, "%s\n", newBuild.Name) if follow { opts := buildapi.BuildLogOptions{ Follow: true, NoWait: false, } rd, err := client.BuildLogs(namespace).Get(newBuild.Name, opts).Stream() if err != nil { return fmt.Errorf("error getting logs: %v", err) } defer rd.Close() _, err = io.Copy(out, rd) if err != nil { return fmt.Errorf("error streaming logs: %v", err) } } return nil }
// RunStartBuild contains all the necessary functionality for the OpenShift cli start-build command func RunStartBuild(f *clientcmd.Factory, in io.Reader, out io.Writer, cmd *cobra.Command, args []string, webhooks util.StringFlag) error { webhook := cmdutil.GetFlagString(cmd, "from-webhook") buildName := cmdutil.GetFlagString(cmd, "from-build") follow := cmdutil.GetFlagBool(cmd, "follow") commit := cmdutil.GetFlagString(cmd, "commit") waitForComplete := cmdutil.GetFlagBool(cmd, "wait") fromFile := cmdutil.GetFlagString(cmd, "from-file") fromDir := cmdutil.GetFlagString(cmd, "from-dir") fromRepo := cmdutil.GetFlagString(cmd, "from-repo") //shortOutput := false //mapper, _ := f.Object() switch { case len(webhook) > 0: if len(args) > 0 || len(buildName) > 0 || len(fromFile) > 0 || len(fromDir) > 0 || len(fromRepo) > 0 { return cmdutil.UsageError(cmd, "The '--from-webhook' flag is incompatible with arguments and all '--from-*' flags") } path := cmdutil.GetFlagString(cmd, "git-repository") postReceivePath := cmdutil.GetFlagString(cmd, "git-post-receive") repo := git.NewRepository() return RunStartBuildWebHook(f, out, webhook, path, postReceivePath, repo) case len(args) != 1 && len(buildName) == 0: return cmdutil.UsageError(cmd, "Must pass a name of a build config or specify build name with '--from-build' flag") } name := buildName isBuild := true if len(name) == 0 { name = args[0] isBuild = false } if webhooks.Provided() { return RunListBuildWebHooks(f, out, cmd.Out(), name, isBuild, webhooks.String()) } client, _, err := f.Clients() if err != nil { return err } namespace, _, err := f.DefaultNamespace() if err != nil { return err } request := &buildapi.BuildRequest{ ObjectMeta: kapi.ObjectMeta{Name: name}, } if len(commit) > 0 { request.Revision = &buildapi.SourceRevision{ Type: buildapi.BuildSourceGit, Git: &buildapi.GitSourceRevision{ Commit: commit, }, } } git := git.NewRepository() var newBuild *buildapi.Build switch { case !isBuild && (len(fromFile) > 0 || len(fromDir) > 0 || len(fromRepo) > 0): request := &buildapi.BinaryBuildRequestOptions{ ObjectMeta: kapi.ObjectMeta{ Name: name, Namespace: namespace, }, Commit: commit, } if newBuild, err = streamPathToBuild(git, in, cmd.Out(), client.BuildConfigs(namespace), fromDir, fromFile, fromRepo, request); err != nil { return err } case isBuild: if newBuild, err = client.Builds(namespace).Clone(request); err != nil { return err } default: if newBuild, err = client.BuildConfigs(namespace).Instantiate(request); err != nil { return err } } //cmdutil.PrintSuccess(mapper, shortOutput, out, "Build", newBuild.Name, "started") fmt.Fprintln(out, newBuild.Name) var ( wg sync.WaitGroup exitErr error ) // Wait for the build to complete if waitForComplete { wg.Add(1) go func() { defer wg.Done() exitErr = WaitForBuildComplete(client.Builds(namespace), newBuild.Name) }() } // Stream the logs from the build if follow { wg.Add(1) go func() { defer wg.Done() opts := buildapi.BuildLogOptions{ Follow: true, NoWait: false, } rd, err := client.BuildLogs(namespace).Get(newBuild.Name, opts).Stream() if err != nil { fmt.Fprintf(cmd.Out(), "error getting logs: %v\n", err) return } defer rd.Close() if _, err = io.Copy(out, rd); err != nil { fmt.Fprintf(cmd.Out(), "error streaming logs: %v\n", err) } }() } wg.Wait() return exitErr }
// TestDockerfilePath validates that we can use a Dockefile with a custom name, and in a sub-directory func TestDockerfilePath(t *testing.T) { tests := []struct { contextDir string dockerfilePath string dockerStrategy *api.DockerBuildStrategy }{ // default Dockerfile path { dockerfilePath: "Dockerfile", dockerStrategy: &api.DockerBuildStrategy{}, }, // custom Dockerfile path in the root context { dockerfilePath: "mydockerfile", dockerStrategy: &api.DockerBuildStrategy{ DockerfilePath: "mydockerfile", }, }, // custom Dockerfile path in a sub directory { dockerfilePath: "dockerfiles/mydockerfile", dockerStrategy: &api.DockerBuildStrategy{ DockerfilePath: "dockerfiles/mydockerfile", }, }, // custom Dockerfile path in a sub directory // with a contextDir { contextDir: "somedir", dockerfilePath: "dockerfiles/mydockerfile", dockerStrategy: &api.DockerBuildStrategy{ DockerfilePath: "dockerfiles/mydockerfile", }, }, } for _, test := range tests { buildDir, err := ioutil.TempDir("", "dockerfile-path") if err != nil { t.Errorf("failed to create tmpdir: %v", err) continue } absoluteDockerfilePath := filepath.Join(buildDir, test.contextDir, test.dockerfilePath) dockerfileContent := "FROM openshift/origin-base" if err = os.MkdirAll(filepath.Dir(absoluteDockerfilePath), os.FileMode(0750)); err != nil { t.Errorf("failed to create directory %s: %v", filepath.Dir(absoluteDockerfilePath), err) continue } if err = ioutil.WriteFile(absoluteDockerfilePath, []byte(dockerfileContent), os.FileMode(0644)); err != nil { t.Errorf("failed to write dockerfile to %s: %v", absoluteDockerfilePath, err) continue } build := &api.Build{ Spec: api.BuildSpec{ Source: api.BuildSource{ Git: &api.GitBuildSource{ URI: "http://github.com/openshift/origin.git", }, ContextDir: test.contextDir, }, Strategy: api.BuildStrategy{ DockerStrategy: test.dockerStrategy, }, Output: api.BuildOutput{ To: &kapi.ObjectReference{ Kind: "DockerImage", Name: "test/test-result:latest", }, }, }, } dockerClient := &FakeDocker{ buildImageFunc: func(opts docker.BuildImageOptions) error { if opts.Dockerfile != test.dockerfilePath { t.Errorf("Unexpected dockerfile path: %s (expected: %s)", opts.Dockerfile, test.dockerfilePath) } return nil }, } dockerBuilder := &DockerBuilder{ dockerClient: dockerClient, build: build, gitClient: git.NewRepository(), tar: tar.New(), } // this will validate that the Dockerfile is readable // and append some labels to the Dockerfile if err = dockerBuilder.addBuildParameters(buildDir); err != nil { t.Errorf("failed to add build parameters: %v", err) continue } // check that our Dockerfile has been modified dockerfileData, err := ioutil.ReadFile(absoluteDockerfilePath) if err != nil { t.Errorf("failed to read dockerfile %s: %v", absoluteDockerfilePath, err) continue } if !strings.Contains(string(dockerfileData), dockerfileContent) { t.Errorf("Updated Dockerfile content does not contains the original Dockerfile content.\n\nOriginal content:\n%s\n\nUpdated content:\n%s\n", dockerfileContent, string(dockerfileData)) continue } // check that the docker client is called with the right Dockerfile parameter if err = dockerBuilder.dockerBuild(buildDir); err != nil { t.Errorf("failed to build: %v", err) continue } } }
// Generate accepts a set of Docker compose project paths and converts them in an // OpenShift template definition. func Generate(paths ...string) (*templateapi.Template, error) { for i := range paths { path, err := filepath.Abs(paths[i]) if err != nil { return nil, err } paths[i] = path } var bases []string for _, s := range paths { bases = append(bases, filepath.Dir(s)) } context := &project.Context{ ComposeFiles: paths, } p := project.NewProject(context) if err := p.Parse(); err != nil { return nil, err } template := &templateapi.Template{} template.Name = p.Name serviceOrder := sets.NewString() warnings := make(map[string][]string) for k, v := range p.Configs { serviceOrder.Insert(k) warnUnusableComposeElements(k, v, warnings) } g := app.NewImageRefGenerator() var errs []error var pipelines app.PipelineGroup builds := make(map[string]*app.Pipeline) // identify colocated components due to shared volumes joins := make(map[string]sets.String) volumesFrom := make(map[string][]string) for _, k := range serviceOrder.List() { if joins[k] == nil { joins[k] = sets.NewString(k) } v := p.Configs[k] for _, from := range v.VolumesFrom { switch parts := strings.Split(from, ":"); len(parts) { case 1: joins[k].Insert(parts[0]) volumesFrom[k] = append(volumesFrom[k], parts[0]) case 2: target := parts[1] if parts[1] == "ro" || parts[1] == "rw" { target = parts[0] } joins[k].Insert(target) volumesFrom[k] = append(volumesFrom[k], target) case 3: joins[k].Insert(parts[1]) volumesFrom[k] = append(volumesFrom[k], parts[1]) } } } joinOrder := sets.NewString() for k := range joins { joinOrder.Insert(k) } var colocated []sets.String for _, k := range joinOrder.List() { set := joins[k] matched := -1 for i, existing := range colocated { if set.Intersection(existing).Len() == 0 { continue } if matched != -1 { return nil, fmt.Errorf("%q belongs with %v, but %v also contains some overlapping elements", k, set, colocated[matched]) } existing.Insert(set.List()...) matched = i continue } if matched == -1 { colocated = append(colocated, set) } } // identify service aliases aliases := make(map[string]sets.String) for _, v := range p.Configs { for _, s := range v.Links.Slice() { parts := strings.SplitN(s, ":", 2) if len(parts) != 2 || parts[0] == parts[1] { continue } set := aliases[parts[0]] if set == nil { set = sets.NewString() aliases[parts[0]] = set } set.Insert(parts[1]) } } // find and define build pipelines for _, k := range serviceOrder.List() { v := p.Configs[k] if len(v.Build) == 0 { continue } if _, ok := builds[v.Build]; ok { continue } var base, relative string for _, s := range bases { if !strings.HasPrefix(v.Build, s) { continue } base = s path, err := filepath.Rel(base, v.Build) if err != nil { return nil, fmt.Errorf("path is not relative to base: %v", err) } relative = path break } if len(base) == 0 { return nil, fmt.Errorf("build path outside of the compose file: %s", v.Build) } // if this is a Git repository, make the path relative if root, err := git.NewRepository().GetRootDir(base); err == nil { if relative, err = filepath.Rel(root, filepath.Join(base, relative)); err != nil { return nil, fmt.Errorf("unable to find relative path for Git repository: %v", err) } base = root } buildPath := filepath.Join(base, relative) // TODO: what if there is no origin for this repo? glog.V(4).Infof("compose service: %#v", v) repo, err := app.NewSourceRepositoryWithDockerfile(buildPath, "") if err != nil { errs = append(errs, err) continue } repo.BuildWithDocker() info := repo.Info() if info == nil || info.Dockerfile == nil { errs = append(errs, fmt.Errorf("unable to locate a Dockerfile in %s", v.Build)) continue } node := info.Dockerfile.AST() baseImage := dockerfileutil.LastBaseImage(node) if len(baseImage) == 0 { errs = append(errs, fmt.Errorf("the Dockerfile in the repository %q has no FROM instruction", info.Path)) continue } var ports []string for _, s := range v.Ports { container, _ := extractFirstPorts(s) ports = append(ports, container) } image, err := g.FromNameAndPorts(baseImage, ports) if err != nil { errs = append(errs, err) continue } image.AsImageStream = true image.TagDirectly = true image.ObjectName = k image.Tag = "from" pipeline, err := app.NewPipelineBuilder(k, nil, false).To(k).NewBuildPipeline(k, image, repo) if err != nil { errs = append(errs, err) continue } if len(relative) > 0 { pipeline.Build.Source.ContextDir = relative } // TODO: this should not be necessary pipeline.Build.Source.Name = k pipeline.Name = k pipeline.Image.ObjectName = k glog.V(4).Infof("created pipeline %+v", pipeline) builds[v.Build] = pipeline pipelines = append(pipelines, pipeline) } if len(errs) > 0 { return nil, utilerrs.NewAggregate(errs) } // create deployment groups for _, pod := range colocated { var group app.PipelineGroup commonMounts := make(map[string]string) for _, k := range pod.List() { v := p.Configs[k] glog.V(4).Infof("compose service: %#v", v) var inputImage *app.ImageRef if len(v.Image) != 0 { image, err := g.FromName(v.Image) if err != nil { errs = append(errs, err) continue } image.AsImageStream = true image.TagDirectly = true image.ObjectName = k inputImage = image } if inputImage == nil { if previous, ok := builds[v.Build]; ok { inputImage = previous.Image } } if inputImage == nil { errs = append(errs, fmt.Errorf("could not find an input image for %q", k)) continue } inputImage.ContainerFn = func(c *kapi.Container) { if len(v.ContainerName) > 0 { c.Name = v.ContainerName } for _, s := range v.Ports { container, _ := extractFirstPorts(s) if port, err := strconv.Atoi(container); err == nil { c.Ports = append(c.Ports, kapi.ContainerPort{ContainerPort: port}) } } c.Args = v.Command.Slice() if len(v.Entrypoint.Slice()) > 0 { c.Command = v.Entrypoint.Slice() } if len(v.WorkingDir) > 0 { c.WorkingDir = v.WorkingDir } c.Env = append(c.Env, app.ParseEnvironment(v.Environment.Slice()...).List()...) if uid, err := strconv.Atoi(v.User); err == nil { uid64 := int64(uid) if c.SecurityContext == nil { c.SecurityContext = &kapi.SecurityContext{} } c.SecurityContext.RunAsUser = &uid64 } c.TTY = v.Tty if v.StdinOpen { c.StdinOnce = true c.Stdin = true } if v.Privileged { if c.SecurityContext == nil { c.SecurityContext = &kapi.SecurityContext{} } t := true c.SecurityContext.Privileged = &t } if v.ReadOnly { if c.SecurityContext == nil { c.SecurityContext = &kapi.SecurityContext{} } t := true c.SecurityContext.ReadOnlyRootFilesystem = &t } if v.MemLimit > 0 { q := resource.NewQuantity(v.MemLimit, resource.DecimalSI) if c.Resources.Limits == nil { c.Resources.Limits = make(kapi.ResourceList) } c.Resources.Limits[kapi.ResourceMemory] = *q } if quota := v.CPUQuota; quota > 0 { if quota < 1000 { quota = 1000 // minQuotaPeriod } milliCPU := quota * 1000 // milliCPUtoCPU milliCPU = milliCPU / 100000 // quotaPeriod q := resource.NewMilliQuantity(milliCPU, resource.DecimalSI) if c.Resources.Limits == nil { c.Resources.Limits = make(kapi.ResourceList) } c.Resources.Limits[kapi.ResourceCPU] = *q } if shares := v.CPUShares; shares > 0 { if shares < 2 { shares = 2 // minShares } milliCPU := shares * 1000 // milliCPUtoCPU milliCPU = milliCPU / 1024 // sharesPerCPU q := resource.NewMilliQuantity(milliCPU, resource.DecimalSI) if c.Resources.Requests == nil { c.Resources.Requests = make(kapi.ResourceList) } c.Resources.Requests[kapi.ResourceCPU] = *q } mountPoints := make(map[string][]string) for _, s := range v.Volumes { switch parts := strings.SplitN(s, ":", 3); len(parts) { case 1: mountPoints[""] = append(mountPoints[""], parts[0]) case 2: fallthrough default: mountPoints[parts[0]] = append(mountPoints[parts[0]], parts[1]) } } for from, at := range mountPoints { name, ok := commonMounts[from] if !ok { name = fmt.Sprintf("dir-%d", len(commonMounts)+1) commonMounts[from] = name } for _, path := range at { c.VolumeMounts = append(c.VolumeMounts, kapi.VolumeMount{Name: name, MountPath: path}) } } } pipeline, err := app.NewPipelineBuilder(k, nil, true).To(k).NewImagePipeline(k, inputImage) if err != nil { errs = append(errs, err) break } if err := pipeline.NeedsDeployment(nil, nil, false); err != nil { return nil, err } group = append(group, pipeline) } if err := group.Reduce(); err != nil { return nil, err } pipelines = append(pipelines, group...) } if len(errs) > 0 { return nil, utilerrs.NewAggregate(errs) } acceptors := app.Acceptors{app.NewAcceptUnique(kapi.Scheme), app.AcceptNew} objects := app.Objects{} accept := app.NewAcceptFirst() for _, p := range pipelines { accepted, err := p.Objects(accept, acceptors) if err != nil { return nil, fmt.Errorf("can't setup %q: %v", p.From, err) } objects = append(objects, accepted...) } // create services for each object with a name based on alias. containers := make(map[string]*kapi.Container) var services []*kapi.Service for _, obj := range objects { switch t := obj.(type) { case *deployapi.DeploymentConfig: ports := app.UniqueContainerToServicePorts(app.AllContainerPorts(t.Spec.Template.Spec.Containers...)) if len(ports) == 0 { continue } svc := app.GenerateService(t.ObjectMeta, t.Spec.Selector) if aliases[svc.Name].Len() == 1 { svc.Name = aliases[svc.Name].List()[0] } svc.Spec.Ports = ports services = append(services, svc) // take a reference to each container for i := range t.Spec.Template.Spec.Containers { c := &t.Spec.Template.Spec.Containers[i] containers[c.Name] = c } } } for _, svc := range services { objects = append(objects, svc) } // for each container that defines VolumesFrom, copy equivalent mounts. // TODO: ensure mount names are unique? for target, otherContainers := range volumesFrom { for _, from := range otherContainers { for _, volume := range containers[from].VolumeMounts { containers[target].VolumeMounts = append(containers[target].VolumeMounts, volume) } } } template.Objects = objects // generate warnings if len(warnings) > 0 { allWarnings := sets.NewString() for msg, services := range warnings { allWarnings.Insert(fmt.Sprintf("%s: %s", strings.Join(services, ","), msg)) } if template.Annotations == nil { template.Annotations = make(map[string]string) } template.Annotations[app.GenerationWarningAnnotation] = fmt.Sprintf("not all docker-compose fields were honored:\n* %s", strings.Join(allWarnings.List(), "\n* ")) } return template, nil }
// NewSourceRefGenerator creates a new SourceRefGenerator func NewSourceRefGenerator() *SourceRefGenerator { return &SourceRefGenerator{ repository: git.NewRepository(), } }
func (o *StartBuildOptions) Complete(f *clientcmd.Factory, in io.Reader, out io.Writer, cmd *cobra.Command, args []string) error { o.In = in o.Out = out o.ErrOut = cmd.Out() o.Git = git.NewRepository() o.ClientConfig = f.OpenShiftClientConfig webhook := o.FromWebhook buildName := o.FromBuild fromFile := o.FromFile fromDir := o.FromDir fromRepo := o.FromRepo buildLogLevel := o.LogLevel switch { case len(webhook) > 0: if len(args) > 0 || len(buildName) > 0 || len(fromFile) > 0 || len(fromDir) > 0 || len(fromRepo) > 0 { return kcmdutil.UsageError(cmd, "The '--from-webhook' flag is incompatible with arguments and all '--from-*' flags") } return nil case len(args) != 1 && len(buildName) == 0: return kcmdutil.UsageError(cmd, "Must pass a name of a build config or specify build name with '--from-build' flag") } o.AsBinary = len(fromFile) > 0 || len(fromDir) > 0 || len(fromRepo) > 0 namespace, _, err := f.DefaultNamespace() if err != nil { return err } client, _, err := f.Clients() if err != nil { return err } o.Client = client var ( name = buildName resource = buildapi.Resource("builds") ) if len(name) == 0 && len(args) > 0 && len(args[0]) > 0 { mapper, _ := f.Object(false) resource, name, err = cmdutil.ResolveResource(buildapi.Resource("buildconfigs"), args[0], mapper) if err != nil { return err } switch resource { case buildapi.Resource("buildconfigs"): // no special handling required case buildapi.Resource("builds"): if len(o.ListWebhooks) == 0 { return fmt.Errorf("use --from-build to rerun your builds") } default: return fmt.Errorf("invalid resource provided: %v", resource) } } // when listing webhooks, allow --from-build to lookup a build config if resource == buildapi.Resource("builds") && len(o.ListWebhooks) > 0 { build, err := client.Builds(namespace).Get(name) if err != nil { return err } ref := build.Status.Config if ref == nil { return fmt.Errorf("the provided Build %q was not created from a BuildConfig and cannot have webhooks", name) } if len(ref.Namespace) > 0 { namespace = ref.Namespace } name = ref.Name } if len(name) == 0 { return fmt.Errorf("a resource name is required either as an argument or by using --from-build") } o.Namespace = namespace o.Name = name env, _, err := cmdutil.ParseEnv(o.Env, in) if err != nil { return err } if len(buildLogLevel) > 0 { env = append(env, kapi.EnvVar{Name: "BUILD_LOGLEVEL", Value: buildLogLevel}) } o.EnvVar = env return nil }
// RunStartBuild contains all the necessary functionality for the OpenShift cli start-build command func RunStartBuild(f *clientcmd.Factory, in io.Reader, out io.Writer, cmd *cobra.Command, envParams []string, args []string, webhooks util.StringFlag) error { webhook := cmdutil.GetFlagString(cmd, "from-webhook") buildName := cmdutil.GetFlagString(cmd, "from-build") follow := cmdutil.GetFlagBool(cmd, "follow") commit := cmdutil.GetFlagString(cmd, "commit") waitForComplete := cmdutil.GetFlagBool(cmd, "wait") fromFile := cmdutil.GetFlagString(cmd, "from-file") fromDir := cmdutil.GetFlagString(cmd, "from-dir") fromRepo := cmdutil.GetFlagString(cmd, "from-repo") buildLogLevel := cmdutil.GetFlagString(cmd, "build-loglevel") switch { case len(webhook) > 0: if len(args) > 0 || len(buildName) > 0 || len(fromFile) > 0 || len(fromDir) > 0 || len(fromRepo) > 0 { return cmdutil.UsageError(cmd, "The '--from-webhook' flag is incompatible with arguments and all '--from-*' flags") } path := cmdutil.GetFlagString(cmd, "git-repository") postReceivePath := cmdutil.GetFlagString(cmd, "git-post-receive") repo := git.NewRepository() return RunStartBuildWebHook(f, out, webhook, path, postReceivePath, repo) case len(args) != 1 && len(buildName) == 0: return cmdutil.UsageError(cmd, "Must pass a name of a build config or specify build name with '--from-build' flag") } namespace, _, err := f.DefaultNamespace() if err != nil { return err } var ( name = buildName resource = "builds" ) if len(name) == 0 && len(args) > 0 && len(args[0]) > 0 { mapper, _ := f.Object() resource, name, err = osutil.ResolveResource("buildconfigs", args[0], mapper) if err != nil { return err } switch resource { case "buildconfigs": // no special handling required case "builds": return fmt.Errorf("use --from-build to rerun your builds") default: return fmt.Errorf("invalid resource provided: %s", resource) } } if len(name) == 0 { return fmt.Errorf("a resource name is required either as an argument or by using --from-build") } if webhooks.Provided() { return RunListBuildWebHooks(f, out, cmd.Out(), name, resource, webhooks.String()) } client, _, err := f.Clients() if err != nil { return err } env, _, err := ParseEnv(envParams, in) if err != nil { return err } if len(buildLogLevel) > 0 { env = append(env, kapi.EnvVar{Name: "BUILD_LOGLEVEL", Value: buildLogLevel}) } request := &buildapi.BuildRequest{ ObjectMeta: kapi.ObjectMeta{Name: name}, } if len(env) > 0 { request.Env = env } if len(commit) > 0 { request.Revision = &buildapi.SourceRevision{ Type: buildapi.BuildSourceGit, Git: &buildapi.GitSourceRevision{ Commit: commit, }, } } git := git.NewRepository() var newBuild *buildapi.Build switch { case len(args) > 0 && (len(fromFile) > 0 || len(fromDir) > 0 || len(fromRepo) > 0): request := &buildapi.BinaryBuildRequestOptions{ ObjectMeta: kapi.ObjectMeta{ Name: name, Namespace: namespace, }, Commit: commit, } if newBuild, err = streamPathToBuild(git, in, cmd.Out(), client.BuildConfigs(namespace), fromDir, fromFile, fromRepo, request); err != nil { return err } case resource == "builds": if newBuild, err = client.Builds(namespace).Clone(request); err != nil { return err } case resource == "buildconfigs": if newBuild, err = client.BuildConfigs(namespace).Instantiate(request); err != nil { return err } default: return fmt.Errorf("invalid resource provided: %s", resource) } fmt.Fprintln(out, newBuild.Name) // mapper, typer := f.Object() // resourceMapper := &resource.Mapper{ObjectTyper: typer, RESTMapper: mapper, ClientMapper: f.ClientMapperForCommand()} // info, err := resourceMapper.InfoForObject(newBuild) // if err != nil { // return err // } // shortOutput := cmdutil.GetFlagString(cmd, "output") == "name" // cmdutil.PrintSuccess(mapper, shortOutput, out, info.Mapping.Resource, info.Name, "started") var ( wg sync.WaitGroup exitErr error ) // Wait for the build to complete if waitForComplete { wg.Add(1) go func() { defer wg.Done() exitErr = WaitForBuildComplete(client.Builds(namespace), newBuild.Name) }() } // Stream the logs from the build if follow { wg.Add(1) go func() { defer wg.Done() opts := buildapi.BuildLogOptions{ Follow: true, NoWait: false, } rd, err := client.BuildLogs(namespace).Get(newBuild.Name, opts).Stream() if err != nil { fmt.Fprintf(cmd.Out(), "error getting logs: %v\n", err) return } defer rd.Close() if _, err = io.Copy(out, rd); err != nil { fmt.Fprintf(cmd.Out(), "error streaming logs: %v\n", err) } }() } wg.Wait() return exitErr }
func (o *StartBuildOptions) Complete(f *clientcmd.Factory, in io.Reader, out io.Writer, cmd *cobra.Command, cmdFullName string, args []string) error { o.In = in o.Out = out o.ErrOut = cmd.OutOrStderr() o.Git = git.NewRepository() o.ClientConfig = f.OpenShiftClientConfig o.Mapper, _ = f.Object(false) webhook := o.FromWebhook buildName := o.FromBuild fromFile := o.FromFile fromDir := o.FromDir fromRepo := o.FromRepo buildLogLevel := o.LogLevel outputFormat := kcmdutil.GetFlagString(cmd, "output") if outputFormat != "name" && outputFormat != "" { return kcmdutil.UsageError(cmd, "Unsupported output format: %s", outputFormat) } o.ShortOutput = outputFormat == "name" switch { case len(webhook) > 0: if len(args) > 0 || len(buildName) > 0 || len(fromFile) > 0 || len(fromDir) > 0 || len(fromRepo) > 0 { return kcmdutil.UsageError(cmd, "The '--from-webhook' flag is incompatible with arguments and all '--from-*' flags") } return nil case len(args) != 1 && len(buildName) == 0: return kcmdutil.UsageError(cmd, "Must pass a name of a build config or specify build name with '--from-build' flag.\nUse \"%s get bc\" to list all available build configs.", cmdFullName) } if len(buildName) != 0 && (len(fromFile) != 0 || len(fromDir) != 0 || len(fromRepo) != 0) { // TODO: we should support this, it should be possible to clone a build to run again with new uploaded artifacts. // Doing so requires introducing a new clonebinary endpoint. return kcmdutil.UsageError(cmd, "Cannot use '--from-build' flag with binary builds") } o.AsBinary = len(fromFile) > 0 || len(fromDir) > 0 || len(fromRepo) > 0 namespace, _, err := f.DefaultNamespace() if err != nil { return err } client, _, err := f.Clients() if err != nil { return err } o.Client = client var ( name = buildName resource = buildapi.Resource("builds") ) if len(name) == 0 && len(args) > 0 && len(args[0]) > 0 { mapper, _ := f.Object(false) resource, name, err = cmdutil.ResolveResource(buildapi.Resource("buildconfigs"), args[0], mapper) if err != nil { return err } switch resource { case buildapi.Resource("buildconfigs"): // no special handling required case buildapi.Resource("builds"): if len(o.ListWebhooks) == 0 { return fmt.Errorf("use --from-build to rerun your builds") } default: return fmt.Errorf("invalid resource provided: %v", resource) } } // when listing webhooks, allow --from-build to lookup a build config if resource == buildapi.Resource("builds") && len(o.ListWebhooks) > 0 { build, err := client.Builds(namespace).Get(name) if err != nil { return err } ref := build.Status.Config if ref == nil { return fmt.Errorf("the provided Build %q was not created from a BuildConfig and cannot have webhooks", name) } if len(ref.Namespace) > 0 { namespace = ref.Namespace } name = ref.Name } if len(name) == 0 { return fmt.Errorf("a resource name is required either as an argument or by using --from-build") } o.Namespace = namespace o.Name = name env, _, err := cmdutil.ParseEnv(o.Env, in) if err != nil { return err } if len(buildLogLevel) > 0 { env = append(env, kapi.EnvVar{Name: "BUILD_LOGLEVEL", Value: buildLogLevel}) } o.EnvVar = env return nil }
// TestDockerfilePath validates that we can use a Dockerfile with a custom name, and in a sub-directory func TestDockerfilePath(t *testing.T) { tests := []struct { contextDir string dockerfilePath string dockerStrategy *api.DockerBuildStrategy }{ // default Dockerfile path { dockerfilePath: "Dockerfile", dockerStrategy: &api.DockerBuildStrategy{}, }, // custom Dockerfile path in the root context { dockerfilePath: "mydockerfile", dockerStrategy: &api.DockerBuildStrategy{ DockerfilePath: "mydockerfile", }, }, // custom Dockerfile path in a sub directory { dockerfilePath: "dockerfiles/mydockerfile", dockerStrategy: &api.DockerBuildStrategy{ DockerfilePath: "dockerfiles/mydockerfile", }, }, // custom Dockerfile path in a sub directory // with a contextDir { contextDir: "somedir", dockerfilePath: "dockerfiles/mydockerfile", dockerStrategy: &api.DockerBuildStrategy{ DockerfilePath: "dockerfiles/mydockerfile", }, }, } from := "FROM openshift/origin-base" expected := []string{ from, // expected env variables "\"OPENSHIFT_BUILD_NAME\"=\"name\"", "\"OPENSHIFT_BUILD_NAMESPACE\"=\"namespace\"", "\"OPENSHIFT_BUILD_SOURCE\"=\"http://github.com/openshift/origin.git\"", "\"OPENSHIFT_BUILD_COMMIT\"=\"commitid\"", // expected labels "\"io.openshift.build.commit.author\"=\"test user \\[email protected]\\u003e\"", "\"io.openshift.build.commit.date\"=\"date\"", "\"io.openshift.build.commit.id\"=\"commitid\"", "\"io.openshift.build.commit.ref\"=\"ref\"", "\"io.openshift.build.commit.message\"=\"message\"", } for _, test := range tests { buildDir, err := ioutil.TempDir(util.GetBaseDir(), "dockerfile-path") if err != nil { t.Errorf("failed to create tmpdir: %v", err) continue } absoluteDockerfilePath := filepath.Join(buildDir, test.contextDir, test.dockerfilePath) if err = os.MkdirAll(filepath.Dir(absoluteDockerfilePath), os.FileMode(0750)); err != nil { t.Errorf("failed to create directory %s: %v", filepath.Dir(absoluteDockerfilePath), err) continue } if err = ioutil.WriteFile(absoluteDockerfilePath, []byte(from), os.FileMode(0644)); err != nil { t.Errorf("failed to write dockerfile to %s: %v", absoluteDockerfilePath, err) continue } build := &api.Build{ Spec: api.BuildSpec{ CommonSpec: api.CommonSpec{ Source: api.BuildSource{ Git: &api.GitBuildSource{ URI: "http://github.com/openshift/origin.git", }, ContextDir: test.contextDir, }, Strategy: api.BuildStrategy{ DockerStrategy: test.dockerStrategy, }, Output: api.BuildOutput{ To: &kapi.ObjectReference{ Kind: "DockerImage", Name: "test/test-result:latest", }, }, }, }, } build.Name = "name" build.Namespace = "namespace" sourceInfo := &git.SourceInfo{} sourceInfo.AuthorName = "test user" sourceInfo.AuthorEmail = "*****@*****.**" sourceInfo.Date = "date" sourceInfo.CommitID = "commitid" sourceInfo.Ref = "ref" sourceInfo.Message = "message" dockerClient := &FakeDocker{ buildImageFunc: func(opts docker.BuildImageOptions) error { if opts.Dockerfile != test.dockerfilePath { t.Errorf("Unexpected dockerfile path: %s (expected: %s)", opts.Dockerfile, test.dockerfilePath) } return nil }, } dockerBuilder := &DockerBuilder{ dockerClient: dockerClient, build: build, gitClient: git.NewRepository(), tar: tar.New(s2iutil.NewFileSystem()), } // this will validate that the Dockerfile is readable // and append some labels to the Dockerfile if err = dockerBuilder.addBuildParameters(buildDir, sourceInfo); err != nil { t.Errorf("failed to add build parameters: %v", err) continue } // check that our Dockerfile has been modified dockerfileData, err := ioutil.ReadFile(absoluteDockerfilePath) if err != nil { t.Errorf("failed to read dockerfile %s: %v", absoluteDockerfilePath, err) continue } for _, value := range expected { if !strings.Contains(string(dockerfileData), value) { t.Errorf("Updated Dockerfile content does not contain expected value:\n%s\n\nUpdated content:\n%s\n", value, string(dockerfileData)) } } // check that the docker client is called with the right Dockerfile parameter if err = dockerBuilder.dockerBuild(buildDir, "", []api.SecretBuildSource{}); err != nil { t.Errorf("failed to build: %v", err) continue } os.RemoveAll(buildDir) } }