//setBuildAnnotationAndLabel set annotations and label info of this build func setBuildAnnotationAndLabel(bcCopy *buildapi.BuildConfig, build *buildapi.Build) { if build.Annotations == nil { build.Annotations = make(map[string]string) } //bcCopy.Status.LastVersion has been increased build.Annotations[buildapi.BuildNumberAnnotation] = strconv.FormatInt(bcCopy.Status.LastVersion, 10) build.Annotations[buildapi.BuildConfigAnnotation] = bcCopy.Name if build.Labels == nil { build.Labels = make(map[string]string) } build.Labels[buildapi.BuildConfigLabelDeprecated] = buildapi.LabelValue(bcCopy.Name) build.Labels[buildapi.BuildConfigLabel] = buildapi.LabelValue(bcCopy.Name) build.Labels[buildapi.BuildRunPolicyLabel] = string(bcCopy.Spec.RunPolicy) }
func TestHandleHandlePipelineBuildDeletionOK(t *testing.T) { deleteWasCalled := false build := mockBuild(buildapi.BuildPhaseComplete, buildapi.BuildOutput{}) build.Spec.Strategy.JenkinsPipelineStrategy = &buildapi.JenkinsPipelineBuildStrategy{} ctrl := BuildDeleteController{&customPodManager{ GetPodFunc: func(namespace, names string) (*kapi.Pod, error) { return &kapi.Pod{ObjectMeta: kapi.ObjectMeta{ Labels: map[string]string{buildapi.BuildLabel: buildapi.LabelValue(build.Name)}, Annotations: map[string]string{buildapi.BuildAnnotation: build.Name}, }}, nil }, DeletePodFunc: func(namespace string, pod *kapi.Pod) error { deleteWasCalled = true return nil }, }} err := ctrl.HandleBuildDeletion(build) if err != nil { t.Errorf("Unexpected error %v", err) } if deleteWasCalled { t.Error("DeletePod was called when it should not have been!") } }
func makeDeprecatedBuild(version int) buildapi.Build { return buildapi.Build{ ObjectMeta: kapi.ObjectMeta{ Name: fmt.Sprintf("build-%d", version), Namespace: "default", Labels: map[string]string{buildapi.BuildConfigLabelDeprecated: buildapi.LabelValue(configName)}, Annotations: map[string]string{buildapi.BuildConfigAnnotation: configName}, }, } }
func makeBuild(configName string, version int) buildapi.Build { return buildapi.Build{ ObjectMeta: kapi.ObjectMeta{ Name: fmt.Sprintf("build-%d", version), UID: ktypes.UID(fmt.Sprintf("build-%d", version)), Namespace: "default", Labels: map[string]string{buildapi.BuildConfigLabel: buildapi.LabelValue(configName)}, Annotations: map[string]string{buildapi.BuildConfigAnnotation: configName}, }, } }
func TestHandleHandleBuildDeletionDeletePodError(t *testing.T) { build := mockBuild(buildapi.BuildPhaseComplete, buildapi.BuildOutput{}) ctrl := BuildDeleteController{&customPodManager{ GetPodFunc: func(namespace, names string) (*kapi.Pod, error) { return &kapi.Pod{ObjectMeta: kapi.ObjectMeta{ Labels: map[string]string{buildapi.BuildLabel: buildapi.LabelValue(build.Name)}, Annotations: map[string]string{buildapi.BuildAnnotation: build.Name}, }}, nil }, DeletePodFunc: func(namespace string, pod *kapi.Pod) error { return errors.New("random") }, }} err := ctrl.HandleBuildDeletion(build) if err == nil { t.Error("Expected random error got none!") } }
func TestDockerCreateBuildPod(t *testing.T) { strategy := DockerBuildStrategy{ Image: "docker-test-image", Codec: kapi.Codecs.LegacyCodec(buildapi.SchemeGroupVersion), } expected := mockDockerBuild() actual, err := strategy.CreateBuildPod(expected) if err != nil { t.Errorf("Unexpected error: %v", err) } if expected, actual := buildapi.GetBuildPodName(expected), actual.ObjectMeta.Name; expected != actual { t.Errorf("Expected %s, but got %s!", expected, actual) } if !reflect.DeepEqual(map[string]string{buildapi.BuildLabel: buildapi.LabelValue(expected.Name)}, actual.Labels) { t.Errorf("Pod Labels does not match Build Labels!") } container := actual.Spec.Containers[0] if container.Name != "docker-build" { t.Errorf("Expected docker-build, but got %s!", container.Name) } if container.Image != strategy.Image { t.Errorf("Expected %s image, got %s!", container.Image, strategy.Image) } if container.ImagePullPolicy != kapi.PullIfNotPresent { t.Errorf("Expected %v, got %v", kapi.PullIfNotPresent, container.ImagePullPolicy) } if actual.Spec.RestartPolicy != kapi.RestartPolicyNever { t.Errorf("Expected never, got %#v", actual.Spec.RestartPolicy) } if len(container.Env) != 10 { var keys []string for _, env := range container.Env { keys = append(keys, env.Name) } t.Fatalf("Expected 10 elements in Env table, got %d:\n%s", len(container.Env), strings.Join(keys, ", ")) } if len(container.VolumeMounts) != 4 { t.Fatalf("Expected 4 volumes in container, got %d", len(container.VolumeMounts)) } if *actual.Spec.ActiveDeadlineSeconds != 60 { t.Errorf("Expected ActiveDeadlineSeconds 60, got %d", *actual.Spec.ActiveDeadlineSeconds) } for i, expected := range []string{dockerSocketPath, DockerPushSecretMountPath, DockerPullSecretMountPath, sourceSecretMountPath} { if container.VolumeMounts[i].MountPath != expected { t.Fatalf("Expected %s in VolumeMount[%d], got %s", expected, i, container.VolumeMounts[i].MountPath) } } if len(actual.Spec.Volumes) != 4 { t.Fatalf("Expected 4 volumes in Build pod, got %d", len(actual.Spec.Volumes)) } if !kapi.Semantic.DeepEqual(container.Resources, expected.Spec.Resources) { t.Fatalf("Expected actual=expected, %v != %v", container.Resources, expected.Spec.Resources) } found := false foundIllegal := false for _, v := range container.Env { if v.Name == "BUILD_LOGLEVEL" && v.Value == "bar" { found = true } if v.Name == "ILLEGAL" { foundIllegal = true } } if !found { t.Fatalf("Expected variable BUILD_LOGLEVEL be defined for the container") } if foundIllegal { t.Fatalf("Found illegal environment variable 'ILLEGAL' defined on container") } buildJSON, _ := runtime.Encode(kapi.Codecs.LegacyCodec(buildapi.SchemeGroupVersion), expected) errorCases := map[int][]string{ 0: {"BUILD", string(buildJSON)}, } for index, exp := range errorCases { if e := container.Env[index]; e.Name != exp[0] || e.Value != exp[1] { t.Errorf("Expected %s:%s, got %s:%s!\n", exp[0], exp[1], e.Name, e.Value) } } }
// getPodLabels creates labels for the Build Pod func getPodLabels(build *buildapi.Build) map[string]string { return map[string]string{buildapi.BuildLabel: buildapi.LabelValue(build.Name)} }
func TestCustomCreateBuildPod(t *testing.T) { strategy := CustomBuildStrategy{ Codec: kapi.Codecs.LegacyCodec(buildapi.SchemeGroupVersion), } expectedBad := mockCustomBuild(false, false) expectedBad.Spec.Strategy.CustomStrategy.From = kapi.ObjectReference{ Kind: "DockerImage", Name: "", } if _, err := strategy.CreateBuildPod(expectedBad); err == nil { t.Errorf("Expected error when Image is empty, got nothing") } build := mockCustomBuild(false, false) actual, err := strategy.CreateBuildPod(build) if err != nil { t.Fatalf("Unexpected error: %v", err) } if expected, actual := buildapi.GetBuildPodName(build), actual.ObjectMeta.Name; expected != actual { t.Errorf("Expected %s, but got %s!", expected, actual) } if !reflect.DeepEqual(map[string]string{buildapi.BuildLabel: buildapi.LabelValue(build.Name)}, actual.Labels) { t.Errorf("Pod Labels does not match Build Labels!") } if !reflect.DeepEqual(nodeSelector, actual.Spec.NodeSelector) { t.Errorf("Pod NodeSelector does not match Build NodeSelector. Expected: %v, got: %v", nodeSelector, actual.Spec.NodeSelector) } container := actual.Spec.Containers[0] if container.Name != "custom-build" { t.Errorf("Expected custom-build, but got %s!", container.Name) } if container.ImagePullPolicy != kapi.PullIfNotPresent { t.Errorf("Expected %v, got %v", kapi.PullIfNotPresent, container.ImagePullPolicy) } if actual.Spec.RestartPolicy != kapi.RestartPolicyNever { t.Errorf("Expected never, got %#v", actual.Spec.RestartPolicy) } if len(container.VolumeMounts) != 3 { t.Fatalf("Expected 3 volumes in container, got %d", len(container.VolumeMounts)) } if *actual.Spec.ActiveDeadlineSeconds != 60 { t.Errorf("Expected ActiveDeadlineSeconds 60, got %d", *actual.Spec.ActiveDeadlineSeconds) } for i, expected := range []string{dockerSocketPath, DockerPushSecretMountPath, sourceSecretMountPath} { if container.VolumeMounts[i].MountPath != expected { t.Fatalf("Expected %s in VolumeMount[%d], got %s", expected, i, container.VolumeMounts[i].MountPath) } } if !kapi.Semantic.DeepEqual(container.Resources, build.Spec.Resources) { t.Fatalf("Expected actual=expected, %v != %v", container.Resources, build.Spec.Resources) } if len(actual.Spec.Volumes) != 3 { t.Fatalf("Expected 3 volumes in Build pod, got %d", len(actual.Spec.Volumes)) } buildJSON, _ := runtime.Encode(kapi.Codecs.LegacyCodec(buildapi.SchemeGroupVersion), build) errorCases := map[int][]string{ 0: {"BUILD", string(buildJSON)}, } standardEnv := []string{"SOURCE_REPOSITORY", "SOURCE_URI", "SOURCE_CONTEXT_DIR", "SOURCE_REF", "OUTPUT_IMAGE", "OUTPUT_REGISTRY", buildapi.OriginVersion} for index, exp := range errorCases { if e := container.Env[index]; e.Name != exp[0] || e.Value != exp[1] { t.Errorf("Expected %s:%s, got %s:%s!\n", exp[0], exp[1], e.Name, e.Value) } } for _, name := range standardEnv { found := false for _, item := range container.Env { if (item.Name == name) && len(item.Value) != 0 { found = true } } if !found { t.Errorf("Expected %s variable to be set", name) } } }
// generateBuildFromConfig generates a build definition based on the current imageid // from any ImageStream that is associated to the BuildConfig by From reference in // the Strategy, or uses the Image field of the Strategy. If binary is provided, override // the current build strategy with a binary artifact for this specific build. // Takes a BuildConfig to base the build on, and an optional SourceRevision to build. func (g *BuildGenerator) generateBuildFromConfig(ctx kapi.Context, bc *buildapi.BuildConfig, revision *buildapi.SourceRevision, binary *buildapi.BinaryBuildSource) (*buildapi.Build, error) { serviceAccount := bc.Spec.ServiceAccount if len(serviceAccount) == 0 { serviceAccount = g.DefaultServiceAccountName } if len(serviceAccount) == 0 { serviceAccount = bootstrappolicy.BuilderServiceAccountName } // Need to copy the buildConfig here so that it doesn't share pointers with // the build object which could be (will be) modified later. obj, _ := kapi.Scheme.Copy(bc) bcCopy := obj.(*buildapi.BuildConfig) build := &buildapi.Build{ Spec: buildapi.BuildSpec{ CommonSpec: buildapi.CommonSpec{ ServiceAccount: serviceAccount, Source: bcCopy.Spec.Source, Strategy: bcCopy.Spec.Strategy, Output: bcCopy.Spec.Output, Revision: revision, Resources: bcCopy.Spec.Resources, PostCommit: bcCopy.Spec.PostCommit, CompletionDeadlineSeconds: bcCopy.Spec.CompletionDeadlineSeconds, }, }, ObjectMeta: kapi.ObjectMeta{ Labels: bcCopy.Labels, }, Status: buildapi.BuildStatus{ Phase: buildapi.BuildPhaseNew, Config: &kapi.ObjectReference{ Kind: "BuildConfig", Name: bc.Name, Namespace: bc.Namespace, }, }, } if binary != nil { build.Spec.Source.Git = nil build.Spec.Source.Binary = binary if build.Spec.Source.Dockerfile != nil && binary.AsFile == "Dockerfile" { build.Spec.Source.Dockerfile = nil } } else { // must explicitly set this because we copied the source values from the buildconfig. build.Spec.Source.Binary = nil } build.Name = getNextBuildName(bc) if build.Annotations == nil { build.Annotations = make(map[string]string) } build.Annotations[buildapi.BuildNumberAnnotation] = strconv.FormatInt(bc.Status.LastVersion, 10) build.Annotations[buildapi.BuildConfigAnnotation] = bcCopy.Name if build.Labels == nil { build.Labels = make(map[string]string) } build.Labels[buildapi.BuildConfigLabelDeprecated] = buildapi.LabelValue(bcCopy.Name) build.Labels[buildapi.BuildConfigLabel] = buildapi.LabelValue(bcCopy.Name) build.Labels[buildapi.BuildRunPolicyLabel] = string(bc.Spec.RunPolicy) builderSecrets, err := g.FetchServiceAccountSecrets(bc.Namespace, serviceAccount) if err != nil { return nil, err } if build.Spec.Output.PushSecret == nil { build.Spec.Output.PushSecret = g.resolveImageSecret(ctx, builderSecrets, build.Spec.Output.To, bc.Namespace) } strategyImageChangeTrigger := getStrategyImageChangeTrigger(bc) // Resolve image source if present for i, sourceImage := range build.Spec.Source.Images { if sourceImage.PullSecret == nil { sourceImage.PullSecret = g.resolveImageSecret(ctx, builderSecrets, &sourceImage.From, bc.Namespace) } var sourceImageSpec string // if the imagesource matches the strategy from, and we have a trigger for the strategy from, // use the imageid from the trigger rather than resolving it. if strategyFrom := buildutil.GetInputReference(bc.Spec.Strategy); reflect.DeepEqual(sourceImage.From, *strategyFrom) && strategyImageChangeTrigger != nil { sourceImageSpec = strategyImageChangeTrigger.LastTriggeredImageID } else { refImageChangeTrigger := getImageChangeTriggerForRef(bc, &sourceImage.From) // if there is no trigger associated with this imagesource, resolve the imagesource reference now. // otherwise use the imageid from the imagesource trigger. if refImageChangeTrigger == nil { sourceImageSpec, err = g.resolveImageStreamReference(ctx, sourceImage.From, bc.Namespace) if err != nil { return nil, err } } else { sourceImageSpec = refImageChangeTrigger.LastTriggeredImageID } } sourceImage.From.Kind = "DockerImage" sourceImage.From.Name = sourceImageSpec sourceImage.From.Namespace = "" build.Spec.Source.Images[i] = sourceImage } // If the Build is using a From reference instead of a resolved image, we need to resolve that From // reference to a valid image so we can run the build. Builds do not consume ImageStream references, // only image specs. var image string if strategyImageChangeTrigger != nil { image = strategyImageChangeTrigger.LastTriggeredImageID } switch { case build.Spec.Strategy.SourceStrategy != nil: if image == "" { image, err = g.resolveImageStreamReference(ctx, build.Spec.Strategy.SourceStrategy.From, build.Status.Config.Namespace) if err != nil { return nil, err } } build.Spec.Strategy.SourceStrategy.From = kapi.ObjectReference{ Kind: "DockerImage", Name: image, } if build.Spec.Strategy.SourceStrategy.RuntimeImage != nil { runtimeImageName, err := g.resolveImageStreamReference(ctx, *build.Spec.Strategy.SourceStrategy.RuntimeImage, build.Status.Config.Namespace) if err != nil { return nil, err } build.Spec.Strategy.SourceStrategy.RuntimeImage = &kapi.ObjectReference{ Kind: "DockerImage", Name: runtimeImageName, } } if build.Spec.Strategy.SourceStrategy.PullSecret == nil { // we have 3 different variations: // 1) builder and runtime images use the same secret => use builder image secret // 2) builder and runtime images use different secrets => use builder image secret // 3) builder doesn't need a secret but runtime image requires it => use runtime image secret // The case when both of the images don't use secret (equals to nil) is covered by the first variant. pullSecret := g.resolveImageSecret(ctx, builderSecrets, &build.Spec.Strategy.SourceStrategy.From, bc.Namespace) if pullSecret == nil { pullSecret = g.resolveImageSecret(ctx, builderSecrets, build.Spec.Strategy.SourceStrategy.RuntimeImage, bc.Namespace) } build.Spec.Strategy.SourceStrategy.PullSecret = pullSecret } case build.Spec.Strategy.DockerStrategy != nil && build.Spec.Strategy.DockerStrategy.From != nil: if image == "" { image, err = g.resolveImageStreamReference(ctx, *build.Spec.Strategy.DockerStrategy.From, build.Status.Config.Namespace) if err != nil { return nil, err } } build.Spec.Strategy.DockerStrategy.From = &kapi.ObjectReference{ Kind: "DockerImage", Name: image, } if build.Spec.Strategy.DockerStrategy.PullSecret == nil { build.Spec.Strategy.DockerStrategy.PullSecret = g.resolveImageSecret(ctx, builderSecrets, build.Spec.Strategy.DockerStrategy.From, bc.Namespace) } case build.Spec.Strategy.CustomStrategy != nil: if image == "" { image, err = g.resolveImageStreamReference(ctx, build.Spec.Strategy.CustomStrategy.From, build.Status.Config.Namespace) if err != nil { return nil, err } } build.Spec.Strategy.CustomStrategy.From = kapi.ObjectReference{ Kind: "DockerImage", Name: image, } if build.Spec.Strategy.CustomStrategy.PullSecret == nil { build.Spec.Strategy.CustomStrategy.PullSecret = g.resolveImageSecret(ctx, builderSecrets, &build.Spec.Strategy.CustomStrategy.From, bc.Namespace) } updateCustomImageEnv(build.Spec.Strategy.CustomStrategy, image) } return build, nil }
func testSTICreateBuildPod(t *testing.T, rootAllowed bool) { strategy := &SourceBuildStrategy{ Image: "sti-test-image", Codec: kapi.Codecs.LegacyCodec(buildapi.SchemeGroupVersion), AdmissionControl: &FakeAdmissionControl{admit: rootAllowed}, } expected := mockSTIBuild() actual, err := strategy.CreateBuildPod(expected) if err != nil { t.Errorf("Unexpected error: %v", err) } if expected, actual := buildapi.GetBuildPodName(expected), actual.ObjectMeta.Name; expected != actual { t.Errorf("Expected %s, but got %s!", expected, actual) } if !reflect.DeepEqual(map[string]string{buildapi.BuildLabel: buildapi.LabelValue(expected.Name)}, actual.Labels) { t.Errorf("Pod Labels does not match Build Labels!") } container := actual.Spec.Containers[0] if container.Name != "sti-build" { t.Errorf("Expected sti-build, but got %s!", container.Name) } if container.Image != strategy.Image { t.Errorf("Expected %s image, got %s!", container.Image, strategy.Image) } if container.ImagePullPolicy != kapi.PullIfNotPresent { t.Errorf("Expected %v, got %v", kapi.PullIfNotPresent, container.ImagePullPolicy) } if actual.Spec.RestartPolicy != kapi.RestartPolicyNever { t.Errorf("Expected never, got %#v", actual.Spec.RestartPolicy) } // strategy ENV is whitelisted into the container environment, and not all // the values are allowed, so only expect 10 not 11 values. expectedEnvCount := 10 if !rootAllowed { expectedEnvCount = 12 } if len(container.Env) != expectedEnvCount { var keys []string for _, env := range container.Env { keys = append(keys, env.Name) } t.Fatalf("Expected 11 elements in Env table, got %d:\n%s", len(container.Env), strings.Join(keys, ", ")) } if len(container.VolumeMounts) != 4 { t.Fatalf("Expected 4 volumes in container, got %d", len(container.VolumeMounts)) } for i, expected := range []string{dockerSocketPath, DockerPushSecretMountPath, DockerPullSecretMountPath, sourceSecretMountPath} { if container.VolumeMounts[i].MountPath != expected { t.Fatalf("Expected %s in VolumeMount[%d], got %s", expected, i, container.VolumeMounts[i].MountPath) } } if len(actual.Spec.Volumes) != 4 { t.Fatalf("Expected 4 volumes in Build pod, got %d", len(actual.Spec.Volumes)) } if *actual.Spec.ActiveDeadlineSeconds != 60 { t.Errorf("Expected ActiveDeadlineSeconds 60, got %d", *actual.Spec.ActiveDeadlineSeconds) } if !kapi.Semantic.DeepEqual(container.Resources, expected.Spec.Resources) { t.Fatalf("Expected actual=expected, %v != %v", container.Resources, expected.Spec.Resources) } found := false foundIllegal := false foundAllowedUIDs := false foundDropCaps := false for _, v := range container.Env { if v.Name == "BUILD_LOGLEVEL" && v.Value == "bar" { found = true } if v.Name == "ILLEGAL" { foundIllegal = true } if v.Name == buildapi.AllowedUIDs && v.Value == "1-" { foundAllowedUIDs = true } if v.Name == buildapi.DropCapabilities && v.Value == "KILL,MKNOD,SETGID,SETUID,SYS_CHROOT" { foundDropCaps = true } } if !found { t.Fatalf("Expected variable BUILD_LOGLEVEL be defined for the container") } if foundIllegal { t.Fatalf("Found illegal environment variable 'ILLEGAL' defined on container") } if foundAllowedUIDs && rootAllowed { t.Fatalf("Did not expect %s when root is allowed", buildapi.AllowedUIDs) } if !foundAllowedUIDs && !rootAllowed { t.Fatalf("Expected %s when root is not allowed", buildapi.AllowedUIDs) } if foundDropCaps && rootAllowed { t.Fatalf("Did not expect %s when root is allowed", buildapi.DropCapabilities) } if !foundDropCaps && !rootAllowed { t.Fatalf("Expected %s when root is not allowed", buildapi.DropCapabilities) } buildJSON, _ := runtime.Encode(kapi.Codecs.LegacyCodec(buildapi.SchemeGroupVersion), expected) errorCases := map[int][]string{ 0: {"BUILD", string(buildJSON)}, } for index, exp := range errorCases { if e := container.Env[index]; e.Name != exp[0] || e.Value != exp[1] { t.Errorf("Expected %s:%s, got %s:%s!\n", exp[0], exp[1], e.Name, e.Value) } } }
// BuildConfigSelector returns a label Selector which can be used to find all // builds for a BuildConfig. func BuildConfigSelector(name string) labels.Selector { return labels.Set{buildapi.BuildConfigLabel: buildapi.LabelValue(name)}.AsSelector() }