// Describe returns the description of a build func (d *BuildDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { c := d.osClient.Builds(namespace) build, err := c.Get(name) if err != nil { return "", err } events, _ := d.kubeClient.Events(namespace).Search(build) if events == nil { events = &kapi.EventList{} } // get also pod events and merge it all into one list for describe if pod, err := d.kubeClient.Pods(namespace).Get(buildapi.GetBuildPodName(build)); err == nil { if podEvents, _ := d.kubeClient.Events(namespace).Search(pod); podEvents != nil { events.Items = append(events.Items, podEvents.Items...) } } return tabbedString(func(out *tabwriter.Writer) error { formatMeta(out, build.ObjectMeta) fmt.Fprintln(out, "") status := bold(build.Status.Phase) if build.Status.Message != "" { status += " (" + build.Status.Message + ")" } formatString(out, "Status", status) if build.Status.StartTimestamp != nil && !build.Status.StartTimestamp.IsZero() { formatString(out, "Started", build.Status.StartTimestamp.Time.Format(time.RFC1123)) } // Create the time object with second-level precision so we don't get // output like "duration: 1.2724395728934s" formatString(out, "Duration", describeBuildDuration(build)) if build.Status.Config != nil { formatString(out, "Build Config", build.Status.Config.Name) } formatString(out, "Build Pod", buildapi.GetBuildPodName(build)) describeCommonSpec(build.Spec.CommonSpec, out) describeBuildTriggerCauses(build.Spec.TriggeredBy, out) if settings.ShowEvents { kctl.DescribeEvents(events, out) } return nil }) }
// List lists all Pods associated with a Build. func (lw *buildPodDeleteLW) List(options kapi.ListOptions) (runtime.Object, error) { glog.V(5).Info("Checking for deleted build pods") buildList, err := lw.Client.Builds(kapi.NamespaceAll).List(options) if err != nil { glog.V(4).Infof("Failed to find any builds due to error %v", err) return nil, err } for _, build := range buildList.Items { glog.V(5).Infof("Found build %s/%s", build.Namespace, build.Name) if buildutil.IsBuildComplete(&build) { glog.V(5).Infof("Ignoring build %s/%s because it is complete", build.Namespace, build.Name) continue } if build.Spec.Strategy.JenkinsPipelineStrategy != nil { glog.V(5).Infof("Ignoring build %s/%s because it is a pipeline build", build.Namespace, build.Name) continue } pod, err := lw.KubeClient.Pods(build.Namespace).Get(buildapi.GetBuildPodName(&build)) if err != nil { if !kerrors.IsNotFound(err) { glog.V(4).Infof("Error getting pod for build %s/%s: %v", build.Namespace, build.Name, err) return nil, err } else { pod = nil } } else { if buildName := buildapi.GetBuildName(pod); buildName != build.Name { pod = nil } } if pod == nil { deletedPod := &kapi.Pod{ ObjectMeta: kapi.ObjectMeta{ Name: buildapi.GetBuildPodName(&build), Namespace: build.Namespace, }, } glog.V(4).Infof("No build pod found for build %s/%s, sending delete event for build pod", build.Namespace, build.Name) err := lw.store.Delete(deletedPod) if err != nil { glog.V(4).Infof("Error queuing delete event: %v", err) } } else { glog.V(5).Infof("Found build pod %s/%s for build %s", pod.Namespace, pod.Name, build.Name) } } return &kapi.PodList{}, nil }
// CancelBuild updates a build status to Cancelled, after its associated pod is deleted. func (bc *BuildController) CancelBuild(build *buildapi.Build) error { if !isBuildCancellable(build) { glog.V(4).Infof("Build %s/%s can be cancelled only if it has pending/running status, not %s.", build.Namespace, build.Name, build.Status.Phase) return nil } glog.V(4).Infof("Cancelling build %s/%s.", build.Namespace, build.Name) pod, err := bc.PodManager.GetPod(build.Namespace, buildapi.GetBuildPodName(build)) if err != nil { if !errors.IsNotFound(err) { return fmt.Errorf("Failed to get pod for build %s/%s: %v", build.Namespace, build.Name, err) } } else { err := bc.PodManager.DeletePod(build.Namespace, pod) if err != nil && !errors.IsNotFound(err) { return fmt.Errorf("Couldn't delete build pod %s/%s: %v", build.Namespace, pod.Name, err) } } build.Status.Phase = buildapi.BuildPhaseCancelled build.Status.Reason = "" build.Status.Message = "" now := unversioned.Now() build.Status.CompletionTimestamp = &now if err := bc.BuildUpdater.Update(build.Namespace, build); err != nil { return fmt.Errorf("Failed to update build %s/%s: %v", build.Namespace, build.Name, err) } glog.V(4).Infof("Build %s/%s was successfully cancelled.", build.Namespace, build.Name) return nil }
func (bs *SourceBuildStrategy) canRunAsRoot(build *buildapi.Build) bool { var rootUser int64 rootUser = 0 pod := &kapi.Pod{ ObjectMeta: kapi.ObjectMeta{ Name: buildapi.GetBuildPodName(build), Namespace: build.Namespace, }, Spec: kapi.PodSpec{ ServiceAccountName: build.Spec.ServiceAccount, Containers: []kapi.Container{ { Name: "sti-build", Image: bs.Image, SecurityContext: &kapi.SecurityContext{ RunAsUser: &rootUser, }, }, }, RestartPolicy: kapi.RestartPolicyNever, }, } userInfo := serviceaccount.UserInfo(build.Namespace, build.Spec.ServiceAccount, "") attrs := admission.NewAttributesRecord(pod, pod, kapi.Kind("Pod").WithVersion(""), pod.Namespace, pod.Name, kapi.Resource("pods").WithVersion(""), "", admission.Create, userInfo) err := bs.AdmissionControl.Admit(attrs) if err != nil { glog.V(2).Infof("Admit for root user returned error: %v", err) } return err == nil }
// HandleBuildDeletion deletes a build pod if the corresponding build has been deleted func (bc *BuildDeleteController) HandleBuildDeletion(build *buildapi.Build) error { glog.V(4).Infof("Handling deletion of build %s", build.Name) if build.Spec.Strategy.JenkinsPipelineStrategy != nil { glog.V(4).Infof("Ignoring build with jenkins pipeline strategy") return nil } podName := buildapi.GetBuildPodName(build) pod, err := bc.PodManager.GetPod(build.Namespace, podName) if err != nil && !errors.IsNotFound(err) { glog.V(2).Infof("Failed to find pod with name %s for build %s in namespace %s due to error: %v", podName, build.Name, build.Namespace, err) return err } if pod == nil { glog.V(2).Infof("Did not find pod with name %s for build %s in namespace %s", podName, build.Name, build.Namespace) return nil } if buildName := buildapi.GetBuildName(pod); buildName != build.Name { glog.V(2).Infof("Not deleting pod %s/%s because the build label %s does not match the build name %s", pod.Namespace, podName, buildName, build.Name) return nil } err = bc.PodManager.DeletePod(build.Namespace, pod) if err != nil && !errors.IsNotFound(err) { glog.V(2).Infof("Failed to delete pod %s/%s for build %s due to error: %v", build.Namespace, podName, build.Name, err) return err } return nil }
func runBuildPodAdmissionTest(t *testing.T, client *client.Client, kclientset *kclientset.Clientset, build *buildapi.Build) (*buildapi.Build, *kapi.Pod) { ns := testutil.Namespace() _, err := client.Builds(ns).Create(build) if err != nil { t.Fatalf("%v", err) } watchOpt := kapi.ListOptions{ FieldSelector: fields.OneTermEqualSelector( "metadata.name", buildapi.GetBuildPodName(build), ), } podWatch, err := kclientset.Core().Pods(ns).Watch(watchOpt) if err != nil { t.Fatalf("%v", err) } type resultObjs struct { build *buildapi.Build pod *kapi.Pod } result := make(chan resultObjs) defer podWatch.Stop() go func() { for e := range podWatch.ResultChan() { if e.Type == watchapi.Added { pod, ok := e.Object.(*kapi.Pod) if !ok { t.Fatalf("unexpected object: %v", e.Object) } build := (*buildtestutil.TestPod)(pod).GetBuild(t) result <- resultObjs{build: build, pod: pod} } } }() select { case <-time.After(buildPodAdmissionTestTimeout): t.Fatalf("timed out after %v", buildPodAdmissionTestTimeout) case objs := <-result: return objs.build, objs.pod } return nil, nil }
func runBuildDeleteTest(t *testing.T, clusterAdminClient *client.Client, clusterAdminKubeClient *kclient.Client) { buildWatch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(kapi.ListOptions{}) if err != nil { t.Fatalf("Couldn't subscribe to Builds %v", err) } defer buildWatch.Stop() created, err := clusterAdminClient.Builds(testutil.Namespace()).Create(mockBuild()) if err != nil { t.Fatalf("Couldn't create Build: %v", err) } podWatch, err := clusterAdminKubeClient.Pods(testutil.Namespace()).Watch(kapi.ListOptions{ResourceVersion: created.ResourceVersion}) if err != nil { t.Fatalf("Couldn't subscribe to Pods %v", err) } defer podWatch.Stop() // wait for initial build event from the creation of the imagerepo with tag latest event := waitForWatch(t, "initial build added", buildWatch) if e, a := watchapi.Added, event.Type; e != a { t.Fatalf("expected watch event type %s, got %s", e, a) } newBuild := event.Object.(*buildapi.Build) // initial pod creation for build event = waitForWatch(t, "build pod created", podWatch) if e, a := watchapi.Added, event.Type; e != a { t.Fatalf("expected watch event type %s, got %s", e, a) } clusterAdminClient.Builds(testutil.Namespace()).Delete(newBuild.Name) event = waitForWatchType(t, "pod deleted due to build deleted", podWatch, watchapi.Deleted) if e, a := watchapi.Deleted, event.Type; e != a { t.Fatalf("expected watch event type %s, got %s", e, a) } pod := event.Object.(*kapi.Pod) if expected := buildapi.GetBuildPodName(newBuild); pod.Name != expected { t.Fatalf("Expected pod %s to be deleted, but pod %s was deleted", expected, pod.Name) } }
func TestCustomCreateBuildPod(t *testing.T) { strategy := CustomBuildStrategy{ Codec: kapi.Codecs.LegacyCodec(buildapi.SchemeGroupVersion), } expectedBad := mockCustomBuild(false, false) expectedBad.Spec.Strategy.CustomStrategy.From = kapi.ObjectReference{ Kind: "DockerImage", Name: "", } if _, err := strategy.CreateBuildPod(expectedBad); err == nil { t.Errorf("Expected error when Image is empty, got nothing") } build := mockCustomBuild(false, false) actual, err := strategy.CreateBuildPod(build) if err != nil { t.Fatalf("Unexpected error: %v", err) } if expected, actual := buildapi.GetBuildPodName(build), actual.ObjectMeta.Name; expected != actual { t.Errorf("Expected %s, but got %s!", expected, actual) } if !reflect.DeepEqual(map[string]string{buildapi.BuildLabel: buildapi.LabelValue(build.Name)}, actual.Labels) { t.Errorf("Pod Labels does not match Build Labels!") } if !reflect.DeepEqual(nodeSelector, actual.Spec.NodeSelector) { t.Errorf("Pod NodeSelector does not match Build NodeSelector. Expected: %v, got: %v", nodeSelector, actual.Spec.NodeSelector) } container := actual.Spec.Containers[0] if container.Name != "custom-build" { t.Errorf("Expected custom-build, but got %s!", container.Name) } if container.ImagePullPolicy != kapi.PullIfNotPresent { t.Errorf("Expected %v, got %v", kapi.PullIfNotPresent, container.ImagePullPolicy) } if actual.Spec.RestartPolicy != kapi.RestartPolicyNever { t.Errorf("Expected never, got %#v", actual.Spec.RestartPolicy) } if len(container.VolumeMounts) != 3 { t.Fatalf("Expected 3 volumes in container, got %d", len(container.VolumeMounts)) } if *actual.Spec.ActiveDeadlineSeconds != 60 { t.Errorf("Expected ActiveDeadlineSeconds 60, got %d", *actual.Spec.ActiveDeadlineSeconds) } for i, expected := range []string{dockerSocketPath, DockerPushSecretMountPath, sourceSecretMountPath} { if container.VolumeMounts[i].MountPath != expected { t.Fatalf("Expected %s in VolumeMount[%d], got %s", expected, i, container.VolumeMounts[i].MountPath) } } if !kapi.Semantic.DeepEqual(container.Resources, build.Spec.Resources) { t.Fatalf("Expected actual=expected, %v != %v", container.Resources, build.Spec.Resources) } if len(actual.Spec.Volumes) != 3 { t.Fatalf("Expected 3 volumes in Build pod, got %d", len(actual.Spec.Volumes)) } buildJSON, _ := runtime.Encode(kapi.Codecs.LegacyCodec(buildapi.SchemeGroupVersion), build) errorCases := map[int][]string{ 0: {"BUILD", string(buildJSON)}, } standardEnv := []string{"SOURCE_REPOSITORY", "SOURCE_URI", "SOURCE_CONTEXT_DIR", "SOURCE_REF", "OUTPUT_IMAGE", "OUTPUT_REGISTRY", buildapi.OriginVersion} for index, exp := range errorCases { if e := container.Env[index]; e.Name != exp[0] || e.Value != exp[1] { t.Errorf("Expected %s:%s, got %s:%s!\n", exp[0], exp[1], e.Name, e.Value) } } for _, name := range standardEnv { found := false for _, item := range container.Env { if (item.Name == name) && len(item.Value) != 0 { found = true } } if !found { t.Errorf("Expected %s variable to be set", name) } } }
g.It("Source: should start a build and wait for the build failed and build pod being killed by kubelet", func() { g.By("calling oc create source-build") err := oc.Run("create").Args("-f", sourceFixture).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("starting the source build with --wait flag and short timeout") br, err := exutil.StartBuildAndWait(oc, "source-build", "--wait") o.Expect(br.StartBuildErr).To(o.HaveOccurred()) // start-build should detect the build error g.By("verifying the build status") o.Expect(br.BuildAttempt).To(o.BeTrue()) // the build should have been attempted o.Expect(br.Build.Status.Phase).Should(o.BeEquivalentTo(buildapi.BuildPhaseFailed)) // the build should have failed g.By("verifying the build pod status") pod, err := oc.KubeREST().Pods(oc.Namespace()).Get(buildapi.GetBuildPodName(br.Build)) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(pod.Status.Phase).Should(o.BeEquivalentTo(kapi.PodFailed)) o.Expect(pod.Status.Reason).Should(o.ContainSubstring("DeadlineExceeded")) }) }) g.Describe("oc start-build docker-build --wait", func() { g.It("Docker: should start a build and wait for the build failed and build pod being killed by kubelet", func() { g.By("calling oc create docker-build") err := oc.Run("create").Args("-f", dockerFixture).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("starting the docker build with --wait flag and short timeout")
// Get returns a streamer resource with the contents of the build log func (r *REST) Get(ctx kapi.Context, name string, opts runtime.Object) (runtime.Object, error) { buildLogOpts, ok := opts.(*api.BuildLogOptions) if !ok { return nil, errors.NewBadRequest("did not get an expected options.") } if errs := validation.ValidateBuildLogOptions(buildLogOpts); len(errs) > 0 { return nil, errors.NewInvalid(api.Kind("BuildLogOptions"), "", errs) } obj, err := r.Getter.Get(ctx, name) if err != nil { return nil, err } build := obj.(*api.Build) if buildLogOpts.Previous { version := buildutil.VersionForBuild(build) // Use the previous version version-- previousBuildName := buildutil.BuildNameForConfigVersion(buildutil.ConfigNameForBuild(build), version) previous, err := r.Getter.Get(ctx, previousBuildName) if err != nil { return nil, err } build = previous.(*api.Build) } switch build.Status.Phase { // Build has not launched, wait til it runs case api.BuildPhaseNew, api.BuildPhasePending: if buildLogOpts.NoWait { glog.V(4).Infof("Build %s/%s is in %s state. No logs to retrieve yet.", build.Namespace, build.Name, build.Status.Phase) // return empty content if not waiting for build return &genericrest.LocationStreamer{}, nil } glog.V(4).Infof("Build %s/%s is in %s state, waiting for Build to start", build.Namespace, build.Name, build.Status.Phase) latest, ok, err := registry.WaitForRunningBuild(r.Watcher, ctx, build, r.Timeout) if err != nil { return nil, errors.NewBadRequest(fmt.Sprintf("unable to wait for build %s to run: %v", build.Name, err)) } switch latest.Status.Phase { case api.BuildPhaseError: return nil, errors.NewBadRequest(fmt.Sprintf("build %s encountered an error: %s", build.Name, buildutil.NoBuildLogsMessage)) case api.BuildPhaseCancelled: return nil, errors.NewBadRequest(fmt.Sprintf("build %s was cancelled: %s", build.Name, buildutil.NoBuildLogsMessage)) } if !ok { return nil, errors.NewTimeoutError(fmt.Sprintf("timed out waiting for build %s to start after %s", build.Name, r.Timeout), 1) } // The build was cancelled case api.BuildPhaseCancelled: return nil, errors.NewBadRequest(fmt.Sprintf("build %s was cancelled. %s", build.Name, buildutil.NoBuildLogsMessage)) // An error occurred launching the build, return an error case api.BuildPhaseError: return nil, errors.NewBadRequest(fmt.Sprintf("build %s is in an error state. %s", build.Name, buildutil.NoBuildLogsMessage)) } // The container should be the default build container, so setting it to blank buildPodName := api.GetBuildPodName(build) logOpts := api.BuildToPodLogOptions(buildLogOpts) location, transport, err := pod.LogLocation(r.PodGetter, r.ConnectionInfo, ctx, buildPodName, logOpts) if err != nil { if errors.IsNotFound(err) { return nil, errors.NewNotFound(kapi.Resource("pod"), buildPodName) } return nil, errors.NewBadRequest(err.Error()) } return &genericrest.LocationStreamer{ Location: location, Transport: transport, ContentType: "text/plain", Flush: buildLogOpts.Follow, ResponseChecker: genericrest.NewGenericHttpResponseChecker(kapi.Resource("pod"), buildPodName), }, nil }
func TestDockerCreateBuildPod(t *testing.T) { strategy := DockerBuildStrategy{ Image: "docker-test-image", Codec: kapi.Codecs.LegacyCodec(buildapi.SchemeGroupVersion), } expected := mockDockerBuild() actual, err := strategy.CreateBuildPod(expected) if err != nil { t.Errorf("Unexpected error: %v", err) } if expected, actual := buildapi.GetBuildPodName(expected), actual.ObjectMeta.Name; expected != actual { t.Errorf("Expected %s, but got %s!", expected, actual) } if !reflect.DeepEqual(map[string]string{buildapi.BuildLabel: buildapi.LabelValue(expected.Name)}, actual.Labels) { t.Errorf("Pod Labels does not match Build Labels!") } container := actual.Spec.Containers[0] if container.Name != "docker-build" { t.Errorf("Expected docker-build, but got %s!", container.Name) } if container.Image != strategy.Image { t.Errorf("Expected %s image, got %s!", container.Image, strategy.Image) } if container.ImagePullPolicy != kapi.PullIfNotPresent { t.Errorf("Expected %v, got %v", kapi.PullIfNotPresent, container.ImagePullPolicy) } if actual.Spec.RestartPolicy != kapi.RestartPolicyNever { t.Errorf("Expected never, got %#v", actual.Spec.RestartPolicy) } if len(container.Env) != 10 { var keys []string for _, env := range container.Env { keys = append(keys, env.Name) } t.Fatalf("Expected 10 elements in Env table, got %d:\n%s", len(container.Env), strings.Join(keys, ", ")) } if len(container.VolumeMounts) != 4 { t.Fatalf("Expected 4 volumes in container, got %d", len(container.VolumeMounts)) } if *actual.Spec.ActiveDeadlineSeconds != 60 { t.Errorf("Expected ActiveDeadlineSeconds 60, got %d", *actual.Spec.ActiveDeadlineSeconds) } for i, expected := range []string{dockerSocketPath, DockerPushSecretMountPath, DockerPullSecretMountPath, sourceSecretMountPath} { if container.VolumeMounts[i].MountPath != expected { t.Fatalf("Expected %s in VolumeMount[%d], got %s", expected, i, container.VolumeMounts[i].MountPath) } } if len(actual.Spec.Volumes) != 4 { t.Fatalf("Expected 4 volumes in Build pod, got %d", len(actual.Spec.Volumes)) } if !kapi.Semantic.DeepEqual(container.Resources, expected.Spec.Resources) { t.Fatalf("Expected actual=expected, %v != %v", container.Resources, expected.Spec.Resources) } found := false foundIllegal := false for _, v := range container.Env { if v.Name == "BUILD_LOGLEVEL" && v.Value == "bar" { found = true } if v.Name == "ILLEGAL" { foundIllegal = true } } if !found { t.Fatalf("Expected variable BUILD_LOGLEVEL be defined for the container") } if foundIllegal { t.Fatalf("Found illegal environment variable 'ILLEGAL' defined on container") } buildJSON, _ := runtime.Encode(kapi.Codecs.LegacyCodec(buildapi.SchemeGroupVersion), expected) errorCases := map[int][]string{ 0: {"BUILD", string(buildJSON)}, } for index, exp := range errorCases { if e := container.Env[index]; e.Name != exp[0] || e.Value != exp[1] { t.Errorf("Expected %s:%s, got %s:%s!\n", exp[0], exp[1], e.Name, e.Value) } } }
func runBuildRunningPodDeleteTest(t *testing.T, clusterAdminClient *client.Client, clusterAdminKubeClient *kclient.Client) { buildWatch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(kapi.ListOptions{}) if err != nil { t.Fatalf("Couldn't subscribe to Builds %v", err) } defer buildWatch.Stop() created, err := clusterAdminClient.Builds(testutil.Namespace()).Create(mockBuild()) if err != nil { t.Fatalf("Couldn't create Build: %v", err) } podWatch, err := clusterAdminKubeClient.Pods(testutil.Namespace()).Watch(kapi.ListOptions{ResourceVersion: created.ResourceVersion}) if err != nil { t.Fatalf("Couldn't subscribe to Pods %v", err) } defer podWatch.Stop() // wait for initial build event from the creation of the imagerepo with tag latest event := waitForWatch(t, "initial build added", buildWatch) if e, a := watchapi.Added, event.Type; e != a { t.Fatalf("expected watch event type %s, got %s", e, a) } newBuild := event.Object.(*buildapi.Build) buildName := newBuild.Name podName := newBuild.Name + "-build" // initial pod creation for build for { event = waitForWatch(t, "build pod created", podWatch) newPod := event.Object.(*kapi.Pod) if newPod.Name == podName { break } } if e, a := watchapi.Added, event.Type; e != a { t.Fatalf("expected watch event type %s, got %s", e, a) } // throw away events from other builds, we only care about the new build // we just triggered for { event = waitForWatch(t, "build updated to pending", buildWatch) newBuild = event.Object.(*buildapi.Build) if newBuild.Name == buildName { break } } if e, a := watchapi.Modified, event.Type; e != a { t.Fatalf("expected watch event type %s, got %s", e, a) } if newBuild.Status.Phase != buildapi.BuildPhasePending { t.Fatalf("expected build status to be marked pending, but was marked %s", newBuild.Status.Phase) } clusterAdminKubeClient.Pods(testutil.Namespace()).Delete(buildapi.GetBuildPodName(newBuild), kapi.NewDeleteOptions(0)) event = waitForWatch(t, "build updated to error", buildWatch) if e, a := watchapi.Modified, event.Type; e != a { t.Fatalf("expected watch event type %s, got %s", e, a) } newBuild = event.Object.(*buildapi.Build) if newBuild.Status.Phase != buildapi.BuildPhaseError { t.Fatalf("expected build status to be marked error, but was marked %s", newBuild.Status.Phase) } }
// TestConcurrentBuildPodControllers tests the lifecycle of a build pod when running multiple controllers. func TestConcurrentBuildPodControllers(t *testing.T) { // Start a master with multiple BuildPodControllers osClient, kClient := setupBuildControllerTest(controllerCount{BuildPodControllers: 5}, t) ns := testutil.Namespace() waitTime := ConcurrentBuildPodControllersTestWait tests := []buildControllerPodTest{ { Name: "running state test", States: []buildControllerPodState{ { PodPhase: kapi.PodRunning, BuildPhase: buildapi.BuildPhaseRunning, }, }, }, { Name: "build succeeded", States: []buildControllerPodState{ { PodPhase: kapi.PodRunning, BuildPhase: buildapi.BuildPhaseRunning, }, { PodPhase: kapi.PodSucceeded, BuildPhase: buildapi.BuildPhaseComplete, }, }, }, { Name: "build failed", States: []buildControllerPodState{ { PodPhase: kapi.PodRunning, BuildPhase: buildapi.BuildPhaseRunning, }, { PodPhase: kapi.PodFailed, BuildPhase: buildapi.BuildPhaseFailed, }, }, }, } for _, test := range tests { // Setup communications channels podReadyChan := make(chan *kapi.Pod) // Will receive a value when a build pod is ready errChan := make(chan error) // Will receive a value when an error occurs stateReached := int32(0) // Create a build b, err := osClient.Builds(ns).Create(mockBuild()) checkErr(t, err) // Watch build pod for transition to pending podWatch, err := kClient.Pods(ns).Watch(kapi.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", buildapi.GetBuildPodName(b))}) checkErr(t, err) go func() { for e := range podWatch.ResultChan() { pod, ok := e.Object.(*kapi.Pod) if !ok { checkErr(t, fmt.Errorf("%s: unexpected object received: %#v\n", test.Name, e.Object)) } if pod.Status.Phase == kapi.PodPending { podReadyChan <- pod break } } }() var pod *kapi.Pod select { case pod = <-podReadyChan: if pod.Status.Phase != kapi.PodPending { t.Errorf("Got wrong pod phase: %s", pod.Status.Phase) podWatch.Stop() continue } case <-time.After(BuildControllersWatchTimeout): t.Errorf("Timed out waiting for build pod to be ready") podWatch.Stop() continue } podWatch.Stop() for _, state := range test.States { // Update pod state and verify that corresponding build state happens accordingly pod, err := kClient.Pods(ns).Get(pod.Name) checkErr(t, err) pod.Status.Phase = state.PodPhase _, err = kClient.Pods(ns).UpdateStatus(pod) checkErr(t, err) buildWatch, err := osClient.Builds(ns).Watch(kapi.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", b.Name), ResourceVersion: b.ResourceVersion}) checkErr(t, err) defer buildWatch.Stop() go func() { done := false for e := range buildWatch.ResultChan() { var ok bool b, ok = e.Object.(*buildapi.Build) if !ok { errChan <- fmt.Errorf("%s: unexpected object received: %#v", test.Name, e.Object) } if e.Type != watchapi.Modified { errChan <- fmt.Errorf("%s: unexpected event received: %s, object: %#v", test.Name, e.Type, e.Object) } if done { errChan <- fmt.Errorf("%s: unexpected build state: %#v", test.Name, e.Object) } else if b.Status.Phase == state.BuildPhase { done = true atomic.StoreInt32(&stateReached, 1) } } }() select { case err := <-errChan: buildWatch.Stop() t.Errorf("%s: Error: %v\n", test.Name, err) break case <-time.After(waitTime): buildWatch.Stop() if atomic.LoadInt32(&stateReached) != 1 { t.Errorf("%s: Did not reach desired build state: %s", test.Name, state.BuildPhase) break } } } } }
func (h *binaryInstantiateHandler) handle(r io.Reader) (runtime.Object, error) { h.options.Name = h.name if err := rest.BeforeCreate(BinaryStrategy, h.ctx, h.options); err != nil { glog.Infof("failed to validate binary: %#v", h.options) return nil, err } request := &buildapi.BuildRequest{} request.Name = h.name if len(h.options.Commit) > 0 { request.Revision = &buildapi.SourceRevision{ Git: &buildapi.GitSourceRevision{ Committer: buildapi.SourceControlUser{ Name: h.options.CommitterName, Email: h.options.CommitterEmail, }, Author: buildapi.SourceControlUser{ Name: h.options.AuthorName, Email: h.options.AuthorEmail, }, Message: h.options.Message, Commit: h.options.Commit, }, } } request.Binary = &buildapi.BinaryBuildSource{ AsFile: h.options.AsFile, } var build *buildapi.Build start := time.Now() if err := wait.Poll(time.Second, h.r.Timeout, func() (bool, error) { result, err := h.r.Generator.Instantiate(h.ctx, request) if err != nil { if errors.IsNotFound(err) { if s, ok := err.(errors.APIStatus); ok { if s.Status().Kind == "imagestreamtags" { return false, nil } } } glog.V(2).Infof("failed to instantiate: %#v", request) return false, err } build = result return true, nil }); err != nil { return nil, err } remaining := h.r.Timeout - time.Now().Sub(start) latest, ok, err := registry.WaitForRunningBuild(h.r.Watcher, h.ctx, build, remaining) if err != nil { switch { case latest.Status.Phase == buildapi.BuildPhaseError: return nil, errors.NewBadRequest(fmt.Sprintf("build %s encountered an error: %s", build.Name, buildutil.NoBuildLogsMessage)) case latest.Status.Phase == buildapi.BuildPhaseCancelled: return nil, errors.NewBadRequest(fmt.Sprintf("build %s was cancelled: %s", build.Name, buildutil.NoBuildLogsMessage)) case err == registry.ErrBuildDeleted: return nil, errors.NewBadRequest(fmt.Sprintf("build %s was deleted before it started: %s", build.Name, buildutil.NoBuildLogsMessage)) default: return nil, errors.NewBadRequest(fmt.Sprintf("unable to wait for build %s to run: %v", build.Name, err)) } } if !ok { return nil, errors.NewTimeoutError(fmt.Sprintf("timed out waiting for build %s to start after %s", build.Name, h.r.Timeout), 0) } if latest.Status.Phase != buildapi.BuildPhaseRunning { return nil, errors.NewBadRequest(fmt.Sprintf("cannot upload file to build %s with status %s", build.Name, latest.Status.Phase)) } // The container should be the default build container, so setting it to blank buildPodName := buildapi.GetBuildPodName(build) opts := &kapi.PodAttachOptions{ Stdin: true, } location, transport, err := pod.AttachLocation(h.r.PodGetter, h.r.ConnectionInfo, h.ctx, buildPodName, opts) if err != nil { if errors.IsNotFound(err) { return nil, errors.NewNotFound(kapi.Resource("pod"), buildPodName) } return nil, errors.NewBadRequest(err.Error()) } rawTransport, ok := transport.(*http.Transport) if !ok { return nil, errors.NewInternalError(fmt.Errorf("unable to connect to node, unrecognized type: %v", reflect.TypeOf(transport))) } upgrader := spdy.NewRoundTripper(rawTransport.TLSClientConfig) exec, err := remotecommand.NewStreamExecutor(upgrader, nil, "POST", location) if err != nil { return nil, errors.NewInternalError(fmt.Errorf("unable to connect to server: %v", err)) } streamOptions := remotecommand.StreamOptions{ SupportedProtocols: kubeletremotecommand.SupportedStreamingProtocols, Stdin: r, } if err := exec.Stream(streamOptions); err != nil { return nil, errors.NewInternalError(err) } return latest, nil }
func testSTICreateBuildPod(t *testing.T, rootAllowed bool) { strategy := &SourceBuildStrategy{ Image: "sti-test-image", Codec: kapi.Codecs.LegacyCodec(buildapi.SchemeGroupVersion), AdmissionControl: &FakeAdmissionControl{admit: rootAllowed}, } expected := mockSTIBuild() actual, err := strategy.CreateBuildPod(expected) if err != nil { t.Errorf("Unexpected error: %v", err) } if expected, actual := buildapi.GetBuildPodName(expected), actual.ObjectMeta.Name; expected != actual { t.Errorf("Expected %s, but got %s!", expected, actual) } if !reflect.DeepEqual(map[string]string{buildapi.BuildLabel: buildapi.LabelValue(expected.Name)}, actual.Labels) { t.Errorf("Pod Labels does not match Build Labels!") } container := actual.Spec.Containers[0] if container.Name != "sti-build" { t.Errorf("Expected sti-build, but got %s!", container.Name) } if container.Image != strategy.Image { t.Errorf("Expected %s image, got %s!", container.Image, strategy.Image) } if container.ImagePullPolicy != kapi.PullIfNotPresent { t.Errorf("Expected %v, got %v", kapi.PullIfNotPresent, container.ImagePullPolicy) } if actual.Spec.RestartPolicy != kapi.RestartPolicyNever { t.Errorf("Expected never, got %#v", actual.Spec.RestartPolicy) } // strategy ENV is whitelisted into the container environment, and not all // the values are allowed, so only expect 10 not 11 values. expectedEnvCount := 10 if !rootAllowed { expectedEnvCount = 12 } if len(container.Env) != expectedEnvCount { var keys []string for _, env := range container.Env { keys = append(keys, env.Name) } t.Fatalf("Expected 11 elements in Env table, got %d:\n%s", len(container.Env), strings.Join(keys, ", ")) } if len(container.VolumeMounts) != 4 { t.Fatalf("Expected 4 volumes in container, got %d", len(container.VolumeMounts)) } for i, expected := range []string{dockerSocketPath, DockerPushSecretMountPath, DockerPullSecretMountPath, sourceSecretMountPath} { if container.VolumeMounts[i].MountPath != expected { t.Fatalf("Expected %s in VolumeMount[%d], got %s", expected, i, container.VolumeMounts[i].MountPath) } } if len(actual.Spec.Volumes) != 4 { t.Fatalf("Expected 4 volumes in Build pod, got %d", len(actual.Spec.Volumes)) } if *actual.Spec.ActiveDeadlineSeconds != 60 { t.Errorf("Expected ActiveDeadlineSeconds 60, got %d", *actual.Spec.ActiveDeadlineSeconds) } if !kapi.Semantic.DeepEqual(container.Resources, expected.Spec.Resources) { t.Fatalf("Expected actual=expected, %v != %v", container.Resources, expected.Spec.Resources) } found := false foundIllegal := false foundAllowedUIDs := false foundDropCaps := false for _, v := range container.Env { if v.Name == "BUILD_LOGLEVEL" && v.Value == "bar" { found = true } if v.Name == "ILLEGAL" { foundIllegal = true } if v.Name == buildapi.AllowedUIDs && v.Value == "1-" { foundAllowedUIDs = true } if v.Name == buildapi.DropCapabilities && v.Value == "KILL,MKNOD,SETGID,SETUID,SYS_CHROOT" { foundDropCaps = true } } if !found { t.Fatalf("Expected variable BUILD_LOGLEVEL be defined for the container") } if foundIllegal { t.Fatalf("Found illegal environment variable 'ILLEGAL' defined on container") } if foundAllowedUIDs && rootAllowed { t.Fatalf("Did not expect %s when root is allowed", buildapi.AllowedUIDs) } if !foundAllowedUIDs && !rootAllowed { t.Fatalf("Expected %s when root is not allowed", buildapi.AllowedUIDs) } if foundDropCaps && rootAllowed { t.Fatalf("Did not expect %s when root is allowed", buildapi.DropCapabilities) } if !foundDropCaps && !rootAllowed { t.Fatalf("Expected %s when root is not allowed", buildapi.DropCapabilities) } buildJSON, _ := runtime.Encode(kapi.Codecs.LegacyCodec(buildapi.SchemeGroupVersion), expected) errorCases := map[int][]string{ 0: {"BUILD", string(buildJSON)}, } for index, exp := range errorCases { if e := container.Env[index]; e.Name != exp[0] || e.Value != exp[1] { t.Errorf("Expected %s:%s, got %s:%s!\n", exp[0], exp[1], e.Name, e.Value) } } }
// CreateBuildPod creates the pod to be used for the Custom build func (bs *CustomBuildStrategy) CreateBuildPod(build *buildapi.Build) (*kapi.Pod, error) { strategy := build.Spec.Strategy.CustomStrategy if strategy == nil { return nil, errors.New("CustomBuildStrategy cannot be executed without CustomStrategy parameters") } codec := bs.Codec if len(strategy.BuildAPIVersion) != 0 { gv, err := unversioned.ParseGroupVersion(strategy.BuildAPIVersion) if err != nil { return nil, FatalError(fmt.Sprintf("failed to parse buildAPIVersion specified in custom build strategy (%q): %v", strategy.BuildAPIVersion, err)) } codec = kapi.Codecs.LegacyCodec(gv) } data, err := runtime.Encode(codec, build) if err != nil { return nil, fmt.Errorf("failed to encode the build: %v", err) } containerEnv := []kapi.EnvVar{{Name: "BUILD", Value: string(data)}} if build.Spec.Source.Git != nil { addSourceEnvVars(build.Spec.Source, &containerEnv) } addOriginVersionVar(&containerEnv) if build.Spec.Output.To != nil { addOutputEnvVars(build.Spec.Output.To, &containerEnv) if err != nil { return nil, fmt.Errorf("failed to parse the output docker tag %q: %v", build.Spec.Output.To.Name, err) } } if len(strategy.From.Name) == 0 { return nil, errors.New("CustomBuildStrategy cannot be executed without image") } if len(strategy.Env) > 0 { containerEnv = append(containerEnv, strategy.Env...) } if strategy.ExposeDockerSocket { glog.V(2).Infof("ExposeDockerSocket is enabled for %s build", build.Name) containerEnv = append(containerEnv, kapi.EnvVar{Name: "DOCKER_SOCKET", Value: dockerSocketPath}) } privileged := true pod := &kapi.Pod{ ObjectMeta: kapi.ObjectMeta{ Name: buildapi.GetBuildPodName(build), Namespace: build.Namespace, Labels: getPodLabels(build), }, Spec: kapi.PodSpec{ ServiceAccountName: build.Spec.ServiceAccount, Containers: []kapi.Container{ { Name: "custom-build", Image: strategy.From.Name, Env: containerEnv, // TODO: run unprivileged https://github.com/openshift/origin/issues/662 SecurityContext: &kapi.SecurityContext{ Privileged: &privileged, }, }, }, RestartPolicy: kapi.RestartPolicyNever, }, } if build.Spec.CompletionDeadlineSeconds != nil { pod.Spec.ActiveDeadlineSeconds = build.Spec.CompletionDeadlineSeconds } if !strategy.ForcePull { pod.Spec.Containers[0].ImagePullPolicy = kapi.PullIfNotPresent } else { glog.V(2).Infof("ForcePull is enabled for %s build", build.Name) pod.Spec.Containers[0].ImagePullPolicy = kapi.PullAlways } pod.Spec.Containers[0].Resources = build.Spec.Resources if build.Spec.Source.Binary != nil { pod.Spec.Containers[0].Stdin = true pod.Spec.Containers[0].StdinOnce = true } if strategy.ExposeDockerSocket { setupDockerSocket(pod) setupDockerSecrets(pod, build.Spec.Output.PushSecret, strategy.PullSecret, build.Spec.Source.Images) } setupSourceSecrets(pod, build.Spec.Source.SourceSecret) setupSecrets(pod, build.Spec.Source.Secrets) setupAdditionalSecrets(pod, build.Spec.Strategy.CustomStrategy.Secrets) return pod, nil }
// CreateBuildPod creates the pod to be used for the Docker build // TODO: Make the Pod definition configurable func (bs *DockerBuildStrategy) CreateBuildPod(build *buildapi.Build) (*kapi.Pod, error) { data, err := runtime.Encode(bs.Codec, build) if err != nil { return nil, fmt.Errorf("failed to encode the build: %v", err) } privileged := true strategy := build.Spec.Strategy.DockerStrategy containerEnv := []kapi.EnvVar{ {Name: "BUILD", Value: string(data)}, {Name: "BUILD_LOGLEVEL", Value: fmt.Sprintf("%d", cmdutil.GetLogLevel())}, } addSourceEnvVars(build.Spec.Source, &containerEnv) addOriginVersionVar(&containerEnv) if len(strategy.Env) > 0 { mergeTrustedEnvWithoutDuplicates(strategy.Env, &containerEnv) } pod := &kapi.Pod{ ObjectMeta: kapi.ObjectMeta{ Name: buildapi.GetBuildPodName(build), Namespace: build.Namespace, Labels: getPodLabels(build), }, Spec: kapi.PodSpec{ ServiceAccountName: build.Spec.ServiceAccount, Containers: []kapi.Container{ { Name: "docker-build", Image: bs.Image, Env: containerEnv, Args: []string{"--loglevel=" + getContainerVerbosity(containerEnv)}, // TODO: run unprivileged https://github.com/openshift/origin/issues/662 SecurityContext: &kapi.SecurityContext{ Privileged: &privileged, }, }, }, RestartPolicy: kapi.RestartPolicyNever, }, } pod.Spec.Containers[0].ImagePullPolicy = kapi.PullIfNotPresent pod.Spec.Containers[0].Resources = build.Spec.Resources if build.Spec.CompletionDeadlineSeconds != nil { pod.Spec.ActiveDeadlineSeconds = build.Spec.CompletionDeadlineSeconds } if build.Spec.Source.Binary != nil { pod.Spec.Containers[0].Stdin = true pod.Spec.Containers[0].StdinOnce = true } setupDockerSocket(pod) setupDockerSecrets(pod, build.Spec.Output.PushSecret, strategy.PullSecret, build.Spec.Source.Images) setupSourceSecrets(pod, build.Spec.Source.SourceSecret) setupSecrets(pod, build.Spec.Source.Secrets) return pod, nil }
func runBuildCompletePodDeleteTest(t *testing.T, clusterAdminClient *client.Client, clusterAdminKubeClient *kclient.Client) { buildWatch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(kapi.ListOptions{}) if err != nil { t.Fatalf("Couldn't subscribe to Builds %v", err) } defer buildWatch.Stop() created, err := clusterAdminClient.Builds(testutil.Namespace()).Create(mockBuild()) if err != nil { t.Fatalf("Couldn't create Build: %v", err) } podWatch, err := clusterAdminKubeClient.Pods(testutil.Namespace()).Watch(kapi.ListOptions{ResourceVersion: created.ResourceVersion}) if err != nil { t.Fatalf("Couldn't subscribe to Pods %v", err) } defer podWatch.Stop() // wait for initial build event from the creation of the imagerepo with tag latest event := waitForWatch(t, "initial build added", buildWatch) if e, a := watchapi.Added, event.Type; e != a { t.Fatalf("expected watch event type %s, got %s", e, a) } newBuild := event.Object.(*buildapi.Build) // initial pod creation for build event = waitForWatch(t, "build pod created", podWatch) if e, a := watchapi.Added, event.Type; e != a { t.Fatalf("expected watch event type %s, got %s", e, a) } event = waitForWatch(t, "build updated to pending", buildWatch) if e, a := watchapi.Modified, event.Type; e != a { t.Fatalf("expected watch event type %s, got %s", e, a) } newBuild = event.Object.(*buildapi.Build) if newBuild.Status.Phase != buildapi.BuildPhasePending { t.Fatalf("expected build status to be marked pending, but was marked %s", newBuild.Status.Phase) } newBuild.Status.Phase = buildapi.BuildPhaseComplete clusterAdminClient.Builds(testutil.Namespace()).Update(newBuild) event = waitForWatch(t, "build updated to complete", buildWatch) if e, a := watchapi.Modified, event.Type; e != a { t.Fatalf("expected watch event type %s, got %s", e, a) } newBuild = event.Object.(*buildapi.Build) if newBuild.Status.Phase != buildapi.BuildPhaseComplete { t.Fatalf("expected build status to be marked complete, but was marked %s", newBuild.Status.Phase) } clusterAdminKubeClient.Pods(testutil.Namespace()).Delete(buildapi.GetBuildPodName(newBuild), kapi.NewDeleteOptions(0)) time.Sleep(10 * time.Second) newBuild, err = clusterAdminClient.Builds(testutil.Namespace()).Get(newBuild.Name) if err != nil { t.Fatalf("unexpected error %v", err) } if newBuild.Status.Phase != buildapi.BuildPhaseComplete { t.Fatalf("build status was updated to %s after deleting pod, should have stayed as %s", newBuild.Status.Phase, buildapi.BuildPhaseComplete) } }
// CreateBuildPod creates a pod that will execute the STI build // TODO: Make the Pod definition configurable func (bs *SourceBuildStrategy) CreateBuildPod(build *buildapi.Build) (*kapi.Pod, error) { data, err := runtime.Encode(bs.Codec, build) if err != nil { return nil, fmt.Errorf("failed to encode the Build %s/%s: %v", build.Namespace, build.Name, err) } containerEnv := []kapi.EnvVar{ {Name: "BUILD", Value: string(data)}, } addSourceEnvVars(build.Spec.Source, &containerEnv) addOriginVersionVar(&containerEnv) strategy := build.Spec.Strategy.SourceStrategy if len(strategy.Env) > 0 { mergeTrustedEnvWithoutDuplicates(strategy.Env, &containerEnv) } // check if can run container as root if !bs.canRunAsRoot(build) { // TODO: both AllowedUIDs and DropCapabilities should // be controlled via the SCC that's in effect for the build service account // For now, both are hard-coded based on whether the build service account can // run as root. containerEnv = append(containerEnv, kapi.EnvVar{Name: buildapi.AllowedUIDs, Value: "1-"}) containerEnv = append(containerEnv, kapi.EnvVar{Name: buildapi.DropCapabilities, Value: strings.Join(DefaultDropCaps, ",")}) } privileged := true pod := &kapi.Pod{ ObjectMeta: kapi.ObjectMeta{ Name: buildapi.GetBuildPodName(build), Namespace: build.Namespace, Labels: getPodLabels(build), }, Spec: kapi.PodSpec{ ServiceAccountName: build.Spec.ServiceAccount, Containers: []kapi.Container{ { Name: "sti-build", Image: bs.Image, Env: containerEnv, // TODO: run unprivileged https://github.com/openshift/origin/issues/662 SecurityContext: &kapi.SecurityContext{ Privileged: &privileged, }, Args: []string{}, }, }, RestartPolicy: kapi.RestartPolicyNever, }, } pod.Spec.Containers[0].ImagePullPolicy = kapi.PullIfNotPresent pod.Spec.Containers[0].Resources = build.Spec.Resources if build.Spec.CompletionDeadlineSeconds != nil { pod.Spec.ActiveDeadlineSeconds = build.Spec.CompletionDeadlineSeconds } if build.Spec.Source.Binary != nil { pod.Spec.Containers[0].Stdin = true pod.Spec.Containers[0].StdinOnce = true } setupDockerSocket(pod) setupDockerSecrets(pod, build.Spec.Output.PushSecret, strategy.PullSecret, build.Spec.Source.Images) setupSourceSecrets(pod, build.Spec.Source.SourceSecret) setupSecrets(pod, build.Spec.Source.Secrets) return pod, nil }
// TestConcurrentBuildControllers tests the transition of a build from new to pending. Ensures that only a single New -> Pending // transition happens and that only a single pod is created during a set period of time. func TestConcurrentBuildControllers(t *testing.T) { // Start a master with multiple BuildControllers osClient, kClient := setupBuildControllerTest(controllerCount{BuildControllers: 5}, t) // Setup an error channel errChan := make(chan error) // go routines will send a message on this channel if an error occurs. Once this happens the test is over // Create a build ns := testutil.Namespace() b, err := osClient.Builds(ns).Create(mockBuild()) checkErr(t, err) // Start watching builds for New -> Pending transition buildWatch, err := osClient.Builds(ns).Watch(kapi.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", b.Name), ResourceVersion: b.ResourceVersion}) checkErr(t, err) defer buildWatch.Stop() buildModifiedCount := int32(0) go func() { for e := range buildWatch.ResultChan() { if e.Type != watchapi.Modified { errChan <- fmt.Errorf("received an unexpected event of type: %s with object: %#v", e.Type, e.Object) } build, ok := e.Object.(*buildapi.Build) if !ok { errChan <- fmt.Errorf("received something other than build: %#v", e.Object) break } // If unexpected status, throw error if build.Status.Phase != buildapi.BuildPhasePending && build.Status.Phase != buildapi.BuildPhaseNew { errChan <- fmt.Errorf("received unexpected build status: %s", build.Status.Phase) break } atomic.AddInt32(&buildModifiedCount, 1) } }() // Watch build pods as they are created podWatch, err := kClient.Pods(ns).Watch(kapi.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", buildapi.GetBuildPodName(b))}) checkErr(t, err) defer podWatch.Stop() podAddedCount := int32(0) go func() { for e := range podWatch.ResultChan() { // Look for creation events if e.Type == watchapi.Added { atomic.AddInt32(&podAddedCount, 1) } } }() select { case err := <-errChan: t.Errorf("Error: %v", err) case <-time.After(ConcurrentBuildControllersTestWait): if atomic.LoadInt32(&buildModifiedCount) < 1 { t.Errorf("The build was modified an unexpected number of times. Got: %d, Expected: >= 1", buildModifiedCount) } if atomic.LoadInt32(&podAddedCount) != 1 { t.Errorf("The build pod was created an unexpected number of times. Got: %d, Expected: 1", podAddedCount) } } }
// GetBuildPodName returns name of the build pod. // TODO: remove in favor of the one in the api package func GetBuildPodName(build *buildapi.Build) string { return buildapi.GetBuildPodName(build) }
g.It("Source: should start a build and wait for the build failed and build pod being killed by kubelet", func() { g.By("calling oc create source-build") err := oc.Run("create").Args("-f", sourceFixture).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("starting the source build with --wait flag and short timeout") br, err := exutil.StartBuildAndWait(oc, "source-build", "--wait") o.Expect(br.StartBuildErr).To(o.HaveOccurred()) // start-build should detect the build error g.By("verifying the build status") o.Expect(br.BuildAttempt).To(o.BeTrue()) // the build should have been attempted o.Expect(br.Build.Status.Phase).Should(o.BeEquivalentTo(buildapi.BuildPhaseFailed)) // the build should have failed g.By("verifying the build pod status") pod, err := oc.KubeClient().Core().Pods(oc.Namespace()).Get(buildapi.GetBuildPodName(br.Build)) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(pod.Status.Phase).Should(o.BeEquivalentTo(kapi.PodFailed)) o.Expect(pod.Status.Reason).Should(o.ContainSubstring("DeadlineExceeded")) }) }) g.Describe("oc start-build docker-build --wait", func() { g.It("Docker: should start a build and wait for the build failed and build pod being killed by kubelet", func() { g.By("calling oc create docker-build") err := oc.Run("create").Args("-f", dockerFixture).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("starting the docker build with --wait flag and short timeout")