// restoreSpecsFromContainerLabels restores all information needed for killing a container. In some // case we may not have pod and container spec when killing a container, e.g. pod is deleted during // kubelet restart. // To solve this problem, we've already written necessary information into container labels. Here we // just need to retrieve them from container labels and restore the specs. // TODO(random-liu): Add a node e2e test to test this behaviour. // TODO(random-liu): Change the lifecycle handler to just accept information needed, so that we can // just pass the needed function not create the fake object. func (m *kubeGenericRuntimeManager) restoreSpecsFromContainerLabels(containerID kubecontainer.ContainerID) (*api.Pod, *api.Container, error) { var pod *api.Pod var container *api.Container s, err := m.runtimeService.ContainerStatus(containerID.ID) if err != nil { return nil, nil, err } l := getContainerInfoFromLabels(s.Labels) a := getContainerInfoFromAnnotations(s.Annotations) // Notice that the followings are not full spec. The container killing code should not use // un-restored fields. pod = &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: l.PodUID, Name: l.PodName, Namespace: l.PodNamespace, DeletionGracePeriodSeconds: a.PodDeletionGracePeriod, }, Spec: api.PodSpec{ TerminationGracePeriodSeconds: a.PodTerminationGracePeriod, }, } container = &api.Container{ Name: l.ContainerName, Ports: a.ContainerPorts, TerminationMessagePath: a.TerminationMessagePath, } if a.PreStopHandler != nil { container.Lifecycle = &api.Lifecycle{ PreStop: a.PreStopHandler, } } return pod, container, nil }
func fakeDeploymentConfig(name string, containers ...containerDesc) *deployapi.DeploymentConfig { specContainers := []kapi.Container{} for _, c := range containers { container := kapi.Container{ Name: c.name, } container.Ports = []kapi.ContainerPort{} for _, p := range c.ports { container.Ports = append(container.Ports, kapi.ContainerPort{ Name: fmt.Sprintf("port-%d-%s", p.port, p.protocol), ContainerPort: p.port, Protocol: kapi.Protocol(p.protocol), }) } specContainers = append(specContainers, container) } return &deployapi.DeploymentConfig{ ObjectMeta: kapi.ObjectMeta{ Name: name, }, Spec: deployapi.DeploymentConfigSpec{ Replicas: 1, Selector: map[string]string{"name": "test"}, Template: &kapi.PodTemplateSpec{ Spec: kapi.PodSpec{ Containers: specContainers, }, }, }, } }
func admissionTestPod() *kapi.Pod { pod := &kapi.Pod{} pod.Name = "test-pod" container := kapi.Container{} container.Name = "foo" container.Image = "openshift/hello-openshift" pod.Spec.Containers = []kapi.Container{container} return pod }
// resolveContainerSecurityContext checks the provided container against the provider, returning any // validation errors encountered on the resulting security context, or the security context that was // resolved. The SecurityContext field of the container is updated, so ensure that a copy of the original // container is passed here if you wish to preserve the original input. func resolveContainerSecurityContext(provider scc.SecurityContextConstraintsProvider, pod *kapi.Pod, container *kapi.Container, path *field.Path) (*kapi.SecurityContext, field.ErrorList) { // We will determine the effective security context for the container and validate against that // since that is how the sc provider will eventually apply settings in the runtime. // This results in an SC that is based on the Pod's PSC with the set fields from the container // overriding pod level settings. container.SecurityContext = sc.DetermineEffectiveSecurityContext(pod, container) csc, err := provider.CreateContainerSecurityContext(pod, container) if err != nil { return nil, field.ErrorList{field.Invalid(path.Child("securityContext"), "", err.Error())} } container.SecurityContext = csc return csc, provider.ValidateContainerSecurityContext(pod, container, path.Child("securityContext")) }
// ToAPIPod converts Pod to api.Pod. Note that if a field in api.Pod has no // corresponding field in Pod, the field would not be populated. func (p *Pod) ToAPIPod() *api.Pod { var pod api.Pod pod.UID = p.ID pod.Name = p.Name pod.Namespace = p.Namespace for _, c := range p.Containers { var container api.Container container.Name = c.Name container.Image = c.Image pod.Spec.Containers = append(pod.Spec.Containers, container) } return &pod }
func Containers(userContainers []interface{}) []api.Container { if len(userContainers) == 0 { return nil } var containers []api.Container for _, c := range userContainers { userContainer := c.(map[string]interface{}) container := api.Container{ Image: userContainer["image"].(string), Name: userContainer["name"].(string), } if _, ok := userContainer["args"]; ok { container.Args = convertListToStringArray(userContainer["args"].([]interface{})) } if _, ok := userContainer["command"]; ok { container.Command = convertListToStringArray(userContainer["command"].([]interface{})) } if _, ok := userContainer["working_dir"]; ok { container.WorkingDir = userContainer["working_dir"].(string) } if _, ok := userContainer["ports"]; ok { container.Ports = ContainerPorts(userContainer["ports"].([]interface{})) } if _, ok := userContainer["env"]; ok { container.Env = EnvVar(userContainer["env"].([]interface{})) } if _, ok := userContainer["volume_mounts"]; ok { container.VolumeMounts = VolumeMounts(userContainer["volume_mounts"].([]interface{})) } if _, ok := userContainer["termination_message_path"]; ok { container.TerminationMessagePath = userContainer["termination_message_path"].(string) } if _, ok := userContainer["image_pull_policy"]; ok { container.ImagePullPolicy = api.PullPolicy(userContainer["image_pull_policy"].(string)) } // TODO: populate these fields: // resources // liveness_probe // readiness_probe // lifecycle // security_context containers = append(containers, container) } return containers }
// NewContainer creates a new Entity for the provided kube.Container. Container must be valid. func NewContainer(container kube.Container, defaults kube.ObjectMeta, source string, objects ...deploy.KubeObject) (*Container, error) { err := validateContainer(container) if err != nil { return nil, fmt.Errorf("could not create Container from `%s`: %v", source, err) } base, err := newBase(EntityContainer, defaults, source, objects) if err != nil { return nil, err } newContainer := Container{base: base} if len(container.Image) != 0 { image, err := image.FromString(container.Image) if err != nil { return nil, err } newContainer.image, err = NewImage(image, defaults, source) if err != nil { return nil, err } container.Image = "placeholder" } newContainer.container = container return &newContainer, nil }
func getTestPod(probeType probeType, probeSpec api.Probe) api.Pod { container := api.Container{ Name: containerName, } switch probeType { case readiness: container.ReadinessProbe = &probeSpec case liveness: container.LivenessProbe = &probeSpec } pod := api.Pod{ Spec: api.PodSpec{ Containers: []api.Container{container}, RestartPolicy: api.RestartPolicyNever, }, } pod.UID = podUID return pod }
func getTestPod(probeType probeType, probeSpec api.Probe) api.Pod { container := api.Container{ Name: testContainerName, } // All tests rely on the fake exec prober. probeSpec.Handler = api.Handler{ Exec: &api.ExecAction{}, } // Apply test defaults, overwridden for test speed. defaults := map[string]int64{ "TimeoutSeconds": 1, "PeriodSeconds": 1, "SuccessThreshold": 1, "FailureThreshold": 1, } for field, value := range defaults { f := reflect.ValueOf(&probeSpec).Elem().FieldByName(field) if f.Int() == 0 { f.SetInt(value) } } switch probeType { case readiness: container.ReadinessProbe = &probeSpec case liveness: container.LivenessProbe = &probeSpec } pod := api.Pod{ Spec: api.PodSpec{ Containers: []api.Container{container}, RestartPolicy: api.RestartPolicyNever, }, } pod.Name = "testPod" pod.UID = testPodUID return pod }
func buildContainers(userContainers []interface{}) []api.Container { if len(userContainers) == 0 { return nil } var containers []api.Container for _, c := range userContainers { userContainer := c.(map[string]interface{}) container := api.Container{ Image: userContainer["image"].(string), Name: userContainer["name"].(string), } if _, ok := userContainer["args"]; ok { container.Args = convertListToStringArray(userContainer["args"].([]interface{})) } if _, ok := userContainer["command"]; ok { container.Command = convertListToStringArray(userContainer["command"].([]interface{})) } if _, ok := userContainer["working_dir"]; ok { container.WorkingDir = userContainer["working_dir"].(string) } if _, ok := userContainer["ports"]; ok { container.Ports = buildContainerPorts(userContainer["ports"].([]interface{})) } if _, ok := userContainer["env"]; ok { container.Env = buildEnvVar(userContainer["env"].([]interface{})) } containers = append(containers, container) } return containers }
// EnsureContainerHasEnvVar if there is an existing EnvVar for the given name then lets update it // with the given value otherwise lets add a new entry. // Returns true if there was already an existing environment variable func EnsureContainerHasEnvVar(container *api.Container, name string, value string) bool { for _, env := range container.Env { if env.Name == name { env.Value = value return true } } container.Env = append(container.Env, api.EnvVar{ Name: name, Value: value, }) return false }
// EnsureContainerHasPreStopCommand ensures that the given container has a `preStop` lifecycle hook // to invoke the given commands func EnsureContainerHasPreStopCommand(container *api.Container, commands []string) { if container.Lifecycle == nil { container.Lifecycle = &api.Lifecycle{} } lifecycle := container.Lifecycle if lifecycle.PreStop == nil { lifecycle.PreStop = &api.Handler{} } preStop := lifecycle.PreStop preStop.Exec = &api.ExecAction{ Command: commands, } }
// EnsureContainerHasVolumeMount ensures that there is a volume mount of the given name with the given values // Returns true if there was already a volume mount func EnsureContainerHasVolumeMount(container *api.Container, name string, mountPath string) bool { for _, vm := range container.VolumeMounts { if vm.Name == name { vm.MountPath = mountPath return true } } container.VolumeMounts = append(container.VolumeMounts, api.VolumeMount{ Name: name, MountPath: mountPath, }) return false }
func (o *ProbeOptions) updateContainer(container *kapi.Container) { if o.Remove { if o.Readiness { container.ReadinessProbe = nil } if o.Liveness { container.LivenessProbe = nil } return } if o.Readiness { if container.ReadinessProbe == nil { container.ReadinessProbe = &kapi.Probe{} } o.updateProbe(container.ReadinessProbe) } if o.Liveness { if container.LivenessProbe == nil { container.LivenessProbe = &kapi.Probe{} } o.updateProbe(container.LivenessProbe) } }
// EnsureContainerHasEnvVarFromField if there is an existing EnvVar for the given name then lets update it // with the given fieldPath otherwise lets add a new entry. // Returns true if there was already an existing environment variable func EnsureContainerHasEnvVarFromField(container *api.Container, name string, fieldPath string) bool { from := &api.EnvVarSource{ FieldRef: &api.ObjectFieldSelector{ FieldPath: fieldPath, }, } for _, env := range container.Env { if env.Name == name { env.ValueFrom = from env.Value = "" return true } } container.Env = append(container.Env, api.EnvVar{ Name: name, ValueFrom: from, }) return false }
func convert_v1beta3_Container_To_api_Container(in *Container, out *api.Container, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*Container))(in) } out.Name = in.Name out.Image = in.Image if in.Command != nil { out.Command = make([]string, len(in.Command)) for i := range in.Command { out.Command[i] = in.Command[i] } } if in.Args != nil { out.Args = make([]string, len(in.Args)) for i := range in.Args { out.Args[i] = in.Args[i] } } out.WorkingDir = in.WorkingDir if in.Ports != nil { out.Ports = make([]api.ContainerPort, len(in.Ports)) for i := range in.Ports { if err := convert_v1beta3_ContainerPort_To_api_ContainerPort(&in.Ports[i], &out.Ports[i], s); err != nil { return err } } } if in.Env != nil { out.Env = make([]api.EnvVar, len(in.Env)) for i := range in.Env { if err := convert_v1beta3_EnvVar_To_api_EnvVar(&in.Env[i], &out.Env[i], s); err != nil { return err } } } if err := s.Convert(&in.Resources, &out.Resources, 0); err != nil { return err } if in.VolumeMounts != nil { out.VolumeMounts = make([]api.VolumeMount, len(in.VolumeMounts)) for i := range in.VolumeMounts { if err := convert_v1beta3_VolumeMount_To_api_VolumeMount(&in.VolumeMounts[i], &out.VolumeMounts[i], s); err != nil { return err } } } if in.LivenessProbe != nil { out.LivenessProbe = new(api.Probe) if err := convert_v1beta3_Probe_To_api_Probe(in.LivenessProbe, out.LivenessProbe, s); err != nil { return err } } else { out.LivenessProbe = nil } if in.ReadinessProbe != nil { out.ReadinessProbe = new(api.Probe) if err := convert_v1beta3_Probe_To_api_Probe(in.ReadinessProbe, out.ReadinessProbe, s); err != nil { return err } } else { out.ReadinessProbe = nil } if in.Lifecycle != nil { out.Lifecycle = new(api.Lifecycle) if err := convert_v1beta3_Lifecycle_To_api_Lifecycle(in.Lifecycle, out.Lifecycle, s); err != nil { return err } } else { out.Lifecycle = nil } out.TerminationMessagePath = in.TerminationMessagePath out.ImagePullPolicy = api.PullPolicy(in.ImagePullPolicy) if in.SecurityContext != nil { if in.SecurityContext.Capabilities != nil { if !reflect.DeepEqual(in.SecurityContext.Capabilities.Add, in.Capabilities.Add) || !reflect.DeepEqual(in.SecurityContext.Capabilities.Drop, in.Capabilities.Drop) { return fmt.Errorf("container capability settings do not match security context settings, cannot convert") } } if in.SecurityContext.Privileged != nil { if in.Privileged != *in.SecurityContext.Privileged { return fmt.Errorf("container privileged settings do not match security context settings, cannot convert") } } } if in.SecurityContext != nil { out.SecurityContext = new(api.SecurityContext) if err := convert_v1beta3_SecurityContext_To_api_SecurityContext(in.SecurityContext, out.SecurityContext, s); err != nil { return err } } else { out.SecurityContext = nil } out.Stdin = in.Stdin out.TTY = in.TTY return nil }
Describe("container runtime conformance blackbox test", func() { var testCContainers []ConformanceContainer namespace := "runtime-conformance" BeforeEach(func() { testCContainers = []ConformanceContainer{} }) Context("when start a container that exits successfully", func() { It("it should run with the expected status [Conformance]", func() { testContainer := api.Container{ Image: ImageRegistry[busyBoxImage], VolumeMounts: []api.VolumeMount{ { MountPath: "/restart-count", Name: "restart-count", }, }, ImagePullPolicy: api.PullIfNotPresent, } testVolumes := []api.Volume{ { Name: "restart-count", VolumeSource: api.VolumeSource{ HostPath: &api.HostPathVolumeSource{ Path: os.TempDir(), }, }, }, } testCount := int32(3)
pollInterval = time.Second * 1 ) var _ = framework.KubeDescribe("Container Runtime Conformance Test", func() { f := NewDefaultFramework("runtime-conformance") Describe("container runtime conformance blackbox test", func() { Context("when starting a container that exits", func() { It("it should run with the expected status [Conformance]", func() { restartCountVolumeName := "restart-count" restartCountVolumePath := "/restart-count" testContainer := api.Container{ Image: ImageRegistry[busyBoxImage], VolumeMounts: []api.VolumeMount{ { MountPath: restartCountVolumePath, Name: restartCountVolumeName, }, }, } testVolumes := []api.Volume{ { Name: restartCountVolumeName, VolumeSource: api.VolumeSource{ HostPath: &api.HostPathVolumeSource{ Path: os.TempDir(), }, }, }, } testCases := []struct {
func deepCopy_api_Container(in api.Container, out *api.Container, c *conversion.Cloner) error { out.Name = in.Name out.Image = in.Image if in.Command != nil { out.Command = make([]string, len(in.Command)) for i := range in.Command { out.Command[i] = in.Command[i] } } else { out.Command = nil } if in.Args != nil { out.Args = make([]string, len(in.Args)) for i := range in.Args { out.Args[i] = in.Args[i] } } else { out.Args = nil } out.WorkingDir = in.WorkingDir if in.Ports != nil { out.Ports = make([]api.ContainerPort, len(in.Ports)) for i := range in.Ports { if err := deepCopy_api_ContainerPort(in.Ports[i], &out.Ports[i], c); err != nil { return err } } } else { out.Ports = nil } if in.Env != nil { out.Env = make([]api.EnvVar, len(in.Env)) for i := range in.Env { if err := deepCopy_api_EnvVar(in.Env[i], &out.Env[i], c); err != nil { return err } } } else { out.Env = nil } if err := deepCopy_api_ResourceRequirements(in.Resources, &out.Resources, c); err != nil { return err } if in.VolumeMounts != nil { out.VolumeMounts = make([]api.VolumeMount, len(in.VolumeMounts)) for i := range in.VolumeMounts { if err := deepCopy_api_VolumeMount(in.VolumeMounts[i], &out.VolumeMounts[i], c); err != nil { return err } } } else { out.VolumeMounts = nil } if in.LivenessProbe != nil { out.LivenessProbe = new(api.Probe) if err := deepCopy_api_Probe(*in.LivenessProbe, out.LivenessProbe, c); err != nil { return err } } else { out.LivenessProbe = nil } if in.ReadinessProbe != nil { out.ReadinessProbe = new(api.Probe) if err := deepCopy_api_Probe(*in.ReadinessProbe, out.ReadinessProbe, c); err != nil { return err } } else { out.ReadinessProbe = nil } if in.Lifecycle != nil { out.Lifecycle = new(api.Lifecycle) if err := deepCopy_api_Lifecycle(*in.Lifecycle, out.Lifecycle, c); err != nil { return err } } else { out.Lifecycle = nil } out.TerminationMessagePath = in.TerminationMessagePath out.ImagePullPolicy = in.ImagePullPolicy if in.SecurityContext != nil { out.SecurityContext = new(api.SecurityContext) if err := deepCopy_api_SecurityContext(*in.SecurityContext, out.SecurityContext, c); err != nil { return err } } else { out.SecurityContext = nil } out.Stdin = in.Stdin out.TTY = in.TTY return nil }
func TestEnforcingServiceAccount(t *testing.T) { masterConfig, err := testserver.DefaultMasterOptions() masterConfig.ServiceAccountConfig.LimitSecretReferences = false if err != nil { t.Fatalf("unexpected error: %v", err) } clusterAdminConfig, err := testserver.StartConfiguredMaster(masterConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } // Get a service account token saToken, err := waitForServiceAccountToken(clusterAdminKubeClient, api.NamespaceDefault, serviceaccountadmission.DefaultServiceAccountName, 20, time.Second) if err != nil { t.Errorf("unexpected error: %v", err) } if len(saToken) == 0 { t.Errorf("token was not created") } pod := &api.Pod{} pod.Name = "foo" pod.Namespace = api.NamespaceDefault pod.Spec.ServiceAccountName = serviceaccountadmission.DefaultServiceAccountName container := api.Container{} container.Name = "foo" container.Image = "openshift/hello-openshift" pod.Spec.Containers = []api.Container{container} secretVolume := api.Volume{} secretVolume.Name = "bar-vol" secretVolume.Secret = &api.SecretVolumeSource{} secretVolume.Secret.SecretName = "bar" pod.Spec.Volumes = []api.Volume{secretVolume} err = wait.Poll(100*time.Millisecond, 5*time.Second, func() (bool, error) { if _, err := clusterAdminKubeClient.Pods(api.NamespaceDefault).Create(pod); err != nil { // The SA admission controller cache seems to take forever to update. This check comes after the limit check, so until we get it sorted out // check if we're getting this particular error if strings.Contains(err.Error(), "no API token found for service account") { return true, nil } t.Log(err) return false, nil } return true, nil }) if err != nil { t.Errorf("unexpected error: %v", err) } clusterAdminKubeClient.Pods(api.NamespaceDefault).Delete(pod.Name, nil) sa, err := clusterAdminKubeClient.ServiceAccounts(api.NamespaceDefault).Get(bootstrappolicy.DeployerServiceAccountName) if err != nil { t.Fatalf("unexpected error: %v", err) } if sa.Annotations == nil { sa.Annotations = map[string]string{} } sa.Annotations[serviceaccountadmission.EnforceMountableSecretsAnnotation] = "true" time.Sleep(5) _, err = clusterAdminKubeClient.ServiceAccounts(api.NamespaceDefault).Update(sa) if err != nil { t.Fatalf("unexpected error: %v", err) } expectedMessage := "is not allowed because service account deployer does not reference that secret" pod.Spec.ServiceAccountName = bootstrappolicy.DeployerServiceAccountName err = wait.Poll(100*time.Millisecond, 5*time.Second, func() (bool, error) { if _, err := clusterAdminKubeClient.Pods(api.NamespaceDefault).Create(pod); err == nil || !strings.Contains(err.Error(), expectedMessage) { clusterAdminKubeClient.Pods(api.NamespaceDefault).Delete(pod.Name, nil) return false, nil } return true, nil }) if err != nil { t.Errorf("unexpected error: %v", err) } }
// Deploys an app based on the given configuration. The app is deployed using the given client. // App deployment consists of a replication controller and an optional service. Both of them share // common labels. func DeployApp(spec *AppDeploymentSpec, client client.Interface) error { log.Printf("Deploying %s application into %s namespace", spec.Name, spec.Namespace) annotations := map[string]string{} if spec.Description != nil { annotations[DescriptionAnnotationKey] = *spec.Description } labels := getLabelsMap(spec.Labels) objectMeta := api.ObjectMeta{ Annotations: annotations, Name: spec.Name, Labels: labels, } containerSpec := api.Container{ Name: spec.Name, Image: spec.ContainerImage, SecurityContext: &api.SecurityContext{ Privileged: &spec.RunAsPrivileged, }, Resources: api.ResourceRequirements{ Requests: make(map[api.ResourceName]resource.Quantity), }, Env: convertEnvVarsSpec(spec.Variables), } if spec.ContainerCommand != nil { containerSpec.Command = []string{*spec.ContainerCommand} } if spec.ContainerCommandArgs != nil { containerSpec.Args = []string{*spec.ContainerCommandArgs} } if spec.CpuRequirement != nil { containerSpec.Resources.Requests[api.ResourceCPU] = *spec.CpuRequirement } if spec.MemoryRequirement != nil { containerSpec.Resources.Requests[api.ResourceMemory] = *spec.MemoryRequirement } podSpec := api.PodSpec{ Containers: []api.Container{containerSpec}, } if spec.ImagePullSecret != nil { podSpec.ImagePullSecrets = []api.LocalObjectReference{{Name: *spec.ImagePullSecret}} } podTemplate := &api.PodTemplateSpec{ ObjectMeta: objectMeta, Spec: podSpec, } replicationController := &api.ReplicationController{ ObjectMeta: objectMeta, Spec: api.ReplicationControllerSpec{ Replicas: spec.Replicas, Selector: labels, Template: podTemplate, }, } _, err := client.ReplicationControllers(spec.Namespace).Create(replicationController) if err != nil { // TODO(bryk): Roll back created resources in case of error. return err } if len(spec.PortMappings) > 0 { service := &api.Service{ ObjectMeta: objectMeta, Spec: api.ServiceSpec{ Selector: labels, }, } if spec.IsExternal { service.Spec.Type = api.ServiceTypeLoadBalancer } else { service.Spec.Type = api.ServiceTypeClusterIP } for _, portMapping := range spec.PortMappings { servicePort := api.ServicePort{ Protocol: portMapping.Protocol, Port: portMapping.Port, Name: generatePortMappingName(portMapping), TargetPort: intstr.IntOrString{ Type: intstr.Int, IntVal: portMapping.TargetPort, }, } service.Spec.Ports = append(service.Spec.Ports, servicePort) } _, err = client.Services(spec.Namespace).Create(service) // TODO(bryk): Roll back created resources in case of error. return err } else { return nil } }
func TestProbe(t *testing.T) { prober := &prober{ refManager: kubecontainer.NewRefManager(), recorder: &record.FakeRecorder{}, } containerID := kubecontainer.ContainerID{Type: "test", ID: "foobar"} execProbe := &api.Probe{ Handler: api.Handler{ Exec: &api.ExecAction{}, }, } tests := []struct { probe *api.Probe execError bool expectError bool execResult probe.Result expectedResult results.Result }{ { // No probe probe: nil, expectedResult: results.Success, }, { // No handler probe: &api.Probe{}, expectError: true, expectedResult: results.Failure, }, { // Probe fails probe: execProbe, execResult: probe.Failure, expectedResult: results.Failure, }, { // Probe succeeds probe: execProbe, execResult: probe.Success, expectedResult: results.Success, }, { // Probe result is unknown probe: execProbe, execResult: probe.Unknown, expectedResult: results.Failure, }, { // Probe has an error probe: execProbe, execError: true, expectError: true, execResult: probe.Unknown, expectedResult: results.Failure, }, } for i, test := range tests { for _, probeType := range [...]probeType{liveness, readiness} { testID := fmt.Sprintf("%d-%s", i, probeType) testContainer := api.Container{} switch probeType { case liveness: testContainer.LivenessProbe = test.probe case readiness: testContainer.ReadinessProbe = test.probe } if test.execError { prober.exec = fakeExecProber{test.execResult, errors.New("exec error")} } else { prober.exec = fakeExecProber{test.execResult, nil} } result, err := prober.probe(probeType, &api.Pod{}, api.PodStatus{}, testContainer, containerID) if test.expectError && err == nil { t.Errorf("[%s] Expected probe error but no error was returned.", testID) } if !test.expectError && err != nil { t.Errorf("[%s] Didn't expect probe error but got: %v", testID, err) } if test.expectedResult != result { t.Errorf("[%s] Expected result to be %v but was %v", testID, test.expectedResult, result) } } } }
// Deploys an app based on the given configuration. The app is deployed using the given client. // App deployment consists of a replication controller and an optional service. Both of them share // common labels. // TODO(bryk): Write tests for this function. func DeployApp(spec *AppDeploymentSpec, client client.Interface) error { annotations := map[string]string{} if spec.Description != nil { annotations[DescriptionAnnotationKey] = *spec.Description } labels := getLabelsMap(spec.Labels) objectMeta := api.ObjectMeta{ Annotations: annotations, Name: spec.Name, Labels: labels, } containerSpec := api.Container{ Name: spec.Name, Image: spec.ContainerImage, SecurityContext: &api.SecurityContext{ Privileged: &spec.RunAsPrivileged, }, } if spec.ContainerCommand != nil { containerSpec.Command = []string{*spec.ContainerCommand} } if spec.ContainerCommandArgs != nil { containerSpec.Args = []string{*spec.ContainerCommandArgs} } podTemplate := &api.PodTemplateSpec{ ObjectMeta: objectMeta, Spec: api.PodSpec{ Containers: []api.Container{containerSpec}, }, } replicaSet := &api.ReplicationController{ ObjectMeta: objectMeta, Spec: api.ReplicationControllerSpec{ Replicas: spec.Replicas, Selector: labels, Template: podTemplate, }, } _, err := client.ReplicationControllers(spec.Namespace).Create(replicaSet) if err != nil { // TODO(bryk): Roll back created resources in case of error. return err } if len(spec.PortMappings) > 0 { service := &api.Service{ ObjectMeta: objectMeta, Spec: api.ServiceSpec{ Selector: labels, }, } if spec.IsExternal { service.Spec.Type = api.ServiceTypeLoadBalancer } else { service.Spec.Type = api.ServiceTypeNodePort } for _, portMapping := range spec.PortMappings { servicePort := api.ServicePort{ Protocol: portMapping.Protocol, Port: portMapping.Port, TargetPort: intstr.IntOrString{ Type: intstr.Int, IntVal: portMapping.TargetPort, }, } service.Spec.Ports = append(service.Spec.Ports, servicePort) } _, err = client.Services(spec.Namespace).Create(service) // TODO(bryk): Roll back created resources in case of error. return err } else { return nil } }
func writePodContainer(m map[string]interface{}, item *api.Container) error { if x, ok := m["name"].(string); ok { item.Name = x } if x, ok := m["image"].(string); ok { item.Image = x } if x, ok := m["image_pull_policy"].(string); ok { item.ImagePullPolicy = api.PullPolicy(x) } if x, ok := m["termination_message_path"].(string); ok { item.TerminationMessagePath = x } if x, ok := m["working_dir"].(string); ok { item.WorkingDir = x } if x, ok := m["command"].([]interface{}); ok { for _, y := range x { item.Command = append(item.Command, y.(string)) } } if x, ok := m["args"].([]interface{}); ok { for _, y := range x { item.Args = append(item.Args, y.(string)) } } if x, ok := m["port"].([]interface{}); ok { for _, y := range x { ref := api.ContainerPort{} writeContainerPort(y.(map[string]interface{}), &ref) item.Ports = append(item.Ports, ref) } } if x, ok := m["env"].([]interface{}); ok { for _, y := range x { ref := api.EnvVar{} writeEnvVar(y.(map[string]interface{}), &ref) item.Env = append(item.Env, ref) } } if x, ok := m["volume_mount"].([]interface{}); ok { for _, y := range x { ref := api.VolumeMount{} writeVolumeMount(y.(map[string]interface{}), &ref) item.VolumeMounts = append(item.VolumeMounts, ref) } } if n, ok := extractSingleMap(m["liveness_probe"]); ok { item.LivenessProbe = &api.Probe{} writeProbe(n, item.LivenessProbe) } if n, ok := extractSingleMap(m["readiness_probe"]); ok { item.ReadinessProbe = &api.Probe{} writeProbe(n, item.ReadinessProbe) } if n, ok := extractSingleMap(m["resources"]); ok { if o, ok := extractSingleMap(n["limits"]); ok { item.Resources.Limits = make(api.ResourceList) if x, ok := o["cpu"].(string); ok && x != "" { q, err := resource.ParseQuantity(x) if err != nil { return fmt.Errorf("%s for %q", err, x) } item.Resources.Limits[api.ResourceCPU] = *q } if x, ok := o["memory"].(string); ok && x != "" { q, err := resource.ParseQuantity(x) if err != nil { return fmt.Errorf("%s for %q", err, x) } item.Resources.Limits[api.ResourceMemory] = *q } } if o, ok := extractSingleMap(n["requests"]); ok { item.Resources.Requests = make(api.ResourceList) if x, ok := o["cpu"].(string); ok && x != "" { q, err := resource.ParseQuantity(x) if err != nil { return fmt.Errorf("%s for %q", err, x) } item.Resources.Requests[api.ResourceCPU] = *q } if x, ok := o["memory"].(string); ok && x != "" { q, err := resource.ParseQuantity(x) if err != nil { return fmt.Errorf("%s for %q", err, x) } item.Resources.Requests[api.ResourceMemory] = *q } } } return nil }